mirror of https://github.com/citusdata/citus.git
Compare commits
26 Commits
Author | SHA1 | Date |
---|---|---|
|
bf46e10905 | |
|
dad0aa079e | |
|
7bd6b48cc0 | |
|
76aad29a72 | |
|
e4ae26050c | |
|
1785ea951a | |
|
ebfcdc66a8 | |
|
6f0cede191 | |
|
c71faad606 | |
|
71385b8a56 | |
|
31aa589c50 | |
|
9127a4b488 | |
|
16c286b1ec | |
|
beaf692c43 | |
|
4fb2b80f3a | |
|
4886473f35 | |
|
abb9da60fe | |
|
d883c96098 | |
|
5153986ae4 | |
|
19b5be65c9 | |
|
2cf7ec5e26 | |
|
7619b50b7f | |
|
0626f366c1 | |
|
e7ac6fd0c1 | |
|
341fdb32fc | |
|
c173b13b73 |
|
@ -1,999 +0,0 @@
|
||||||
version: 2.1
|
|
||||||
orbs:
|
|
||||||
codecov: codecov/codecov@1.1.1
|
|
||||||
azure-cli: circleci/azure-cli@1.0.0
|
|
||||||
|
|
||||||
parameters:
|
|
||||||
image_suffix:
|
|
||||||
type: string
|
|
||||||
default: '-v7e4468f'
|
|
||||||
pg13_version:
|
|
||||||
type: string
|
|
||||||
default: '13.9'
|
|
||||||
pg14_version:
|
|
||||||
type: string
|
|
||||||
default: '14.6'
|
|
||||||
pg15_version:
|
|
||||||
type: string
|
|
||||||
default: '15.1'
|
|
||||||
upgrade_pg_versions:
|
|
||||||
type: string
|
|
||||||
default: '13.9-14.6-15.1'
|
|
||||||
style_checker_tools_version:
|
|
||||||
type: string
|
|
||||||
default: '0.8.18'
|
|
||||||
flaky_test:
|
|
||||||
type: string
|
|
||||||
default: ''
|
|
||||||
flaky_test_runs_per_job:
|
|
||||||
type: integer
|
|
||||||
default: 50
|
|
||||||
skip_flaky_tests:
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
|
|
||||||
commands:
|
|
||||||
install_extension:
|
|
||||||
parameters:
|
|
||||||
pg_major:
|
|
||||||
description: 'postgres major version to use'
|
|
||||||
type: integer
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: 'Install Extension'
|
|
||||||
command: |
|
|
||||||
tar xfv "${CIRCLE_WORKING_DIRECTORY}/install-<< parameters.pg_major >>.tar" --directory /
|
|
||||||
|
|
||||||
configure:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: 'Configure'
|
|
||||||
command: |
|
|
||||||
chown -R circleci .
|
|
||||||
gosu circleci ./configure --without-pg-version-check
|
|
||||||
|
|
||||||
enable_core:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: 'Enable core dumps'
|
|
||||||
command: |
|
|
||||||
ulimit -c unlimited
|
|
||||||
|
|
||||||
save_regressions:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: 'Regressions'
|
|
||||||
command: |
|
|
||||||
if [ -f "src/test/regress/regression.diffs" ]; then
|
|
||||||
cat src/test/regress/regression.diffs
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
when: on_fail
|
|
||||||
- store_artifacts:
|
|
||||||
name: 'Save regressions'
|
|
||||||
path: src/test/regress/regression.diffs
|
|
||||||
|
|
||||||
save_logs_and_results:
|
|
||||||
steps:
|
|
||||||
- store_artifacts:
|
|
||||||
name: 'Save mitmproxy output (failure test specific)'
|
|
||||||
path: src/test/regress/proxy.output
|
|
||||||
- store_artifacts:
|
|
||||||
name: 'Save results'
|
|
||||||
path: src/test/regress/results/
|
|
||||||
- store_artifacts:
|
|
||||||
name: 'Save coordinator log'
|
|
||||||
path: src/test/regress/tmp_check/master/log
|
|
||||||
- store_artifacts:
|
|
||||||
name: 'Save worker1 log'
|
|
||||||
path: src/test/regress/tmp_check/worker.57637/log
|
|
||||||
- store_artifacts:
|
|
||||||
name: 'Save worker2 log'
|
|
||||||
path: src/test/regress/tmp_check/worker.57638/log
|
|
||||||
|
|
||||||
stack_trace:
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: 'Print stack traces'
|
|
||||||
command: |
|
|
||||||
./ci/print_stack_trace.sh
|
|
||||||
when: on_fail
|
|
||||||
|
|
||||||
coverage:
|
|
||||||
parameters:
|
|
||||||
flags:
|
|
||||||
description: 'codecov flags'
|
|
||||||
type: string
|
|
||||||
steps:
|
|
||||||
- codecov/upload:
|
|
||||||
flags: '<< parameters.flags >>'
|
|
||||||
- run:
|
|
||||||
name: 'Create codeclimate coverage'
|
|
||||||
command: |
|
|
||||||
lcov --directory . --capture --output-file lcov.info
|
|
||||||
lcov --remove lcov.info -o lcov.info '/usr/*'
|
|
||||||
sed "s=^SF:$PWD/=SF:=g" -i lcov.info # relative pats are required by codeclimate
|
|
||||||
mkdir -p /tmp/codeclimate
|
|
||||||
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/$CIRCLE_JOB.json lcov.info
|
|
||||||
- persist_to_workspace:
|
|
||||||
root: /tmp
|
|
||||||
paths:
|
|
||||||
- codeclimate/*.json
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
description: Build the citus extension
|
|
||||||
parameters:
|
|
||||||
pg_major:
|
|
||||||
description: postgres major version building citus for
|
|
||||||
type: integer
|
|
||||||
image:
|
|
||||||
description: docker image to use for the build
|
|
||||||
type: string
|
|
||||||
default: citus/extbuilder
|
|
||||||
image_tag:
|
|
||||||
description: tag to use for the docker image
|
|
||||||
type: string
|
|
||||||
docker:
|
|
||||||
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run:
|
|
||||||
name: 'Configure, Build, and Install'
|
|
||||||
command: |
|
|
||||||
./ci/build-citus.sh
|
|
||||||
- persist_to_workspace:
|
|
||||||
root: .
|
|
||||||
paths:
|
|
||||||
- build-<< parameters.pg_major >>/*
|
|
||||||
- install-<<parameters.pg_major >>.tar
|
|
||||||
|
|
||||||
check-style:
|
|
||||||
docker:
|
|
||||||
- image: 'citus/stylechecker:<< pipeline.parameters.style_checker_tools_version >><< pipeline.parameters.image_suffix >>'
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run:
|
|
||||||
name: 'Check Style'
|
|
||||||
command: citus_indent --check
|
|
||||||
- run:
|
|
||||||
name: 'Fix whitespace'
|
|
||||||
command: ci/editorconfig.sh && git diff --exit-code
|
|
||||||
- run:
|
|
||||||
name: 'Remove useless declarations'
|
|
||||||
command: ci/remove_useless_declarations.sh && git diff --cached --exit-code
|
|
||||||
- run:
|
|
||||||
name: 'Normalize test output'
|
|
||||||
command: ci/normalize_expected.sh && git diff --exit-code
|
|
||||||
- run:
|
|
||||||
name: 'Check for C-style comments in migration files'
|
|
||||||
command: ci/disallow_c_comments_in_migrations.sh && git diff --exit-code
|
|
||||||
- run:
|
|
||||||
name: 'Check for comment--cached ns that start with # character in spec files'
|
|
||||||
command: ci/disallow_hash_comments_in_spec_files.sh && git diff --exit-code
|
|
||||||
- run:
|
|
||||||
name: 'Check for gitignore entries .for source files'
|
|
||||||
command: ci/fix_gitignore.sh && git diff --exit-code
|
|
||||||
- run:
|
|
||||||
name: 'Check for lengths of changelog entries'
|
|
||||||
command: ci/disallow_long_changelog_entries.sh
|
|
||||||
- run:
|
|
||||||
name: 'Check for banned C API usage'
|
|
||||||
command: ci/banned.h.sh
|
|
||||||
- run:
|
|
||||||
name: 'Check for tests missing in schedules'
|
|
||||||
command: ci/check_all_tests_are_run.sh
|
|
||||||
- run:
|
|
||||||
name: 'Check if all CI scripts are actually run'
|
|
||||||
command: ci/check_all_ci_scripts_are_run.sh
|
|
||||||
- run:
|
|
||||||
name: 'Check if all GUCs are sorted alphabetically'
|
|
||||||
command: ci/check_gucs_are_alphabetically_sorted.sh
|
|
||||||
|
|
||||||
check-sql-snapshots:
|
|
||||||
docker:
|
|
||||||
- image: 'citus/extbuilder:latest'
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run:
|
|
||||||
name: 'Check Snapshots'
|
|
||||||
command: ci/check_sql_snapshots.sh
|
|
||||||
|
|
||||||
test-pg-upgrade:
|
|
||||||
description: Runs postgres upgrade tests
|
|
||||||
parameters:
|
|
||||||
old_pg_major:
|
|
||||||
description: 'postgres major version to use before the upgrade'
|
|
||||||
type: integer
|
|
||||||
new_pg_major:
|
|
||||||
description: 'postgres major version to upgrade to'
|
|
||||||
type: integer
|
|
||||||
image:
|
|
||||||
description: 'docker image to use as for the tests'
|
|
||||||
type: string
|
|
||||||
default: citus/pgupgradetester
|
|
||||||
image_tag:
|
|
||||||
description: 'docker image tag to use'
|
|
||||||
type: string
|
|
||||||
docker:
|
|
||||||
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
|
|
||||||
working_directory: /home/circleci/project
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- attach_workspace:
|
|
||||||
at: .
|
|
||||||
- install_extension:
|
|
||||||
pg_major: << parameters.old_pg_major >>
|
|
||||||
- install_extension:
|
|
||||||
pg_major: << parameters.new_pg_major >>
|
|
||||||
- configure
|
|
||||||
- enable_core
|
|
||||||
- run:
|
|
||||||
name: 'Install and test postgres upgrade'
|
|
||||||
command: |
|
|
||||||
gosu circleci \
|
|
||||||
make -C src/test/regress \
|
|
||||||
check-pg-upgrade \
|
|
||||||
old-bindir=/usr/lib/postgresql/<< parameters.old_pg_major >>/bin \
|
|
||||||
new-bindir=/usr/lib/postgresql/<< parameters.new_pg_major >>/bin
|
|
||||||
no_output_timeout: 2m
|
|
||||||
- run:
|
|
||||||
name: 'Copy pg_upgrade logs for newData dir'
|
|
||||||
command: |
|
|
||||||
mkdir -p /tmp/pg_upgrade_newData_logs
|
|
||||||
if ls src/test/regress/tmp_upgrade/newData/*.log 1> /dev/null 2>&1; then
|
|
||||||
cp src/test/regress/tmp_upgrade/newData/*.log /tmp/pg_upgrade_newData_logs
|
|
||||||
fi
|
|
||||||
when: on_fail
|
|
||||||
- store_artifacts:
|
|
||||||
name: 'Save pg_upgrade logs for newData dir'
|
|
||||||
path: /tmp/pg_upgrade_newData_logs
|
|
||||||
- save_logs_and_results
|
|
||||||
- save_regressions
|
|
||||||
- stack_trace
|
|
||||||
- coverage:
|
|
||||||
flags: 'test_<< parameters.old_pg_major >>_<< parameters.new_pg_major >>,upgrade'
|
|
||||||
|
|
||||||
test-arbitrary-configs:
|
|
||||||
description: Runs tests on arbitrary configs
|
|
||||||
parallelism: 6
|
|
||||||
parameters:
|
|
||||||
pg_major:
|
|
||||||
description: 'postgres major version to use'
|
|
||||||
type: integer
|
|
||||||
image:
|
|
||||||
description: 'docker image to use as for the tests'
|
|
||||||
type: string
|
|
||||||
default: citus/failtester
|
|
||||||
image_tag:
|
|
||||||
description: 'docker image tag to use'
|
|
||||||
type: string
|
|
||||||
docker:
|
|
||||||
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
|
|
||||||
resource_class: xlarge
|
|
||||||
working_directory: /home/circleci/project
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- attach_workspace:
|
|
||||||
at: .
|
|
||||||
- install_extension:
|
|
||||||
pg_major: << parameters.pg_major >>
|
|
||||||
- configure
|
|
||||||
- enable_core
|
|
||||||
- run:
|
|
||||||
name: 'Test arbitrary configs'
|
|
||||||
command: |
|
|
||||||
TESTS=$(src/test/regress/citus_tests/print_test_names.py | circleci tests split)
|
|
||||||
# Our test suite expects comma separated values
|
|
||||||
TESTS=$(echo $TESTS | tr ' ' ',')
|
|
||||||
# TESTS will contain subset of configs that will be run on a container and we use multiple containers
|
|
||||||
# to run the test suite
|
|
||||||
gosu circleci \
|
|
||||||
make -C src/test/regress \
|
|
||||||
check-arbitrary-configs parallel=4 CONFIGS=$TESTS
|
|
||||||
no_output_timeout: 2m
|
|
||||||
- run:
|
|
||||||
name: 'Show regressions'
|
|
||||||
command: |
|
|
||||||
find src/test/regress/tmp_citus_test/ -name "regression*.diffs" -exec cat {} +
|
|
||||||
lines=$(find src/test/regress/tmp_citus_test/ -name "regression*.diffs" | wc -l)
|
|
||||||
if [ $lines -ne 0 ]; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
when: on_fail
|
|
||||||
- run:
|
|
||||||
name: 'Copy logfiles'
|
|
||||||
command: |
|
|
||||||
mkdir src/test/regress/tmp_citus_test/logfiles
|
|
||||||
find src/test/regress/tmp_citus_test/ -name "logfile_*" -exec cp -t src/test/regress/tmp_citus_test/logfiles/ {} +
|
|
||||||
when: on_fail
|
|
||||||
- store_artifacts:
|
|
||||||
name: 'Save logfiles'
|
|
||||||
path: src/test/regress/tmp_citus_test/logfiles
|
|
||||||
- save_logs_and_results
|
|
||||||
- stack_trace
|
|
||||||
- coverage:
|
|
||||||
flags: 'test_<< parameters.pg_major >>,upgrade'
|
|
||||||
|
|
||||||
test-citus-upgrade:
|
|
||||||
description: Runs citus upgrade tests
|
|
||||||
parameters:
|
|
||||||
pg_major:
|
|
||||||
description: 'postgres major version'
|
|
||||||
type: integer
|
|
||||||
image:
|
|
||||||
description: 'docker image to use as for the tests'
|
|
||||||
type: string
|
|
||||||
default: citus/citusupgradetester
|
|
||||||
image_tag:
|
|
||||||
description: 'docker image tag to use'
|
|
||||||
type: string
|
|
||||||
docker:
|
|
||||||
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
|
|
||||||
working_directory: /home/circleci/project
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- attach_workspace:
|
|
||||||
at: .
|
|
||||||
- configure
|
|
||||||
- enable_core
|
|
||||||
- run:
|
|
||||||
name: 'Install and test citus upgrade'
|
|
||||||
command: |
|
|
||||||
# run make check-citus-upgrade for all citus versions
|
|
||||||
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
|
|
||||||
for citus_version in ${CITUS_VERSIONS}; do \
|
|
||||||
gosu circleci \
|
|
||||||
make -C src/test/regress \
|
|
||||||
check-citus-upgrade \
|
|
||||||
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
|
|
||||||
citus-old-version=${citus_version} \
|
|
||||||
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
|
|
||||||
citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \
|
|
||||||
done;
|
|
||||||
|
|
||||||
# run make check-citus-upgrade-mixed for all citus versions
|
|
||||||
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
|
|
||||||
for citus_version in ${CITUS_VERSIONS}; do \
|
|
||||||
gosu circleci \
|
|
||||||
make -C src/test/regress \
|
|
||||||
check-citus-upgrade-mixed \
|
|
||||||
citus-old-version=${citus_version} \
|
|
||||||
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
|
|
||||||
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
|
|
||||||
citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \
|
|
||||||
done;
|
|
||||||
no_output_timeout: 2m
|
|
||||||
- save_logs_and_results
|
|
||||||
- save_regressions
|
|
||||||
- stack_trace
|
|
||||||
- coverage:
|
|
||||||
flags: 'test_<< parameters.pg_major >>,upgrade'
|
|
||||||
|
|
||||||
test-citus:
|
|
||||||
description: Runs the common tests of citus
|
|
||||||
parameters:
|
|
||||||
pg_major:
|
|
||||||
description: 'postgres major version'
|
|
||||||
type: integer
|
|
||||||
image:
|
|
||||||
description: 'docker image to use as for the tests'
|
|
||||||
type: string
|
|
||||||
default: citus/exttester
|
|
||||||
image_tag:
|
|
||||||
description: 'docker image tag to use'
|
|
||||||
type: string
|
|
||||||
make:
|
|
||||||
description: 'make target'
|
|
||||||
type: string
|
|
||||||
docker:
|
|
||||||
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
|
|
||||||
working_directory: /home/circleci/project
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- attach_workspace:
|
|
||||||
at: .
|
|
||||||
- install_extension:
|
|
||||||
pg_major: << parameters.pg_major >>
|
|
||||||
- configure
|
|
||||||
- enable_core
|
|
||||||
- run:
|
|
||||||
name: 'Run Test'
|
|
||||||
command: |
|
|
||||||
gosu circleci make -C src/test/regress << parameters.make >>
|
|
||||||
no_output_timeout: 2m
|
|
||||||
- save_logs_and_results
|
|
||||||
- save_regressions
|
|
||||||
- stack_trace
|
|
||||||
- coverage:
|
|
||||||
flags: 'test_<< parameters.pg_major >>,<< parameters.make >>'
|
|
||||||
|
|
||||||
tap-test-citus:
|
|
||||||
description: Runs tap tests for citus
|
|
||||||
parameters:
|
|
||||||
pg_major:
|
|
||||||
description: 'postgres major version'
|
|
||||||
type: integer
|
|
||||||
image:
|
|
||||||
description: 'docker image to use as for the tests'
|
|
||||||
type: string
|
|
||||||
default: citus/exttester
|
|
||||||
image_tag:
|
|
||||||
description: 'docker image tag to use'
|
|
||||||
type: string
|
|
||||||
suite:
|
|
||||||
description: 'name of the tap test suite to run'
|
|
||||||
type: string
|
|
||||||
make:
|
|
||||||
description: 'make target'
|
|
||||||
type: string
|
|
||||||
default: installcheck
|
|
||||||
docker:
|
|
||||||
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
|
|
||||||
working_directory: /home/circleci/project
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- attach_workspace:
|
|
||||||
at: .
|
|
||||||
- install_extension:
|
|
||||||
pg_major: << parameters.pg_major >>
|
|
||||||
- configure
|
|
||||||
- enable_core
|
|
||||||
- run:
|
|
||||||
name: 'Run Test'
|
|
||||||
command: |
|
|
||||||
gosu circleci make -C src/test/<< parameters.suite >> << parameters.make >>
|
|
||||||
no_output_timeout: 2m
|
|
||||||
- store_artifacts:
|
|
||||||
name: 'Save tap logs'
|
|
||||||
path: /home/circleci/project/src/test/<< parameters.suite >>/tmp_check/log
|
|
||||||
- save_logs_and_results
|
|
||||||
- stack_trace
|
|
||||||
- coverage:
|
|
||||||
flags: 'test_<< parameters.pg_major >>,tap_<< parameters.suite >>_<< parameters.make >>'
|
|
||||||
|
|
||||||
check-merge-to-enterprise:
|
|
||||||
docker:
|
|
||||||
- image: citus/extbuilder:<< pipeline.parameters.pg13_version >>
|
|
||||||
working_directory: /home/circleci/project
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run:
|
|
||||||
command: |
|
|
||||||
ci/check_enterprise_merge.sh
|
|
||||||
|
|
||||||
ch_benchmark:
|
|
||||||
docker:
|
|
||||||
- image: buildpack-deps:stretch
|
|
||||||
working_directory: /home/circleci/project
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- azure-cli/install
|
|
||||||
- azure-cli/login-with-service-principal
|
|
||||||
- run:
|
|
||||||
command: |
|
|
||||||
cd ./src/test/hammerdb
|
|
||||||
sh run_hammerdb.sh citusbot_ch_benchmark_rg
|
|
||||||
name: install dependencies and run ch_benchmark tests
|
|
||||||
no_output_timeout: 20m
|
|
||||||
|
|
||||||
tpcc_benchmark:
|
|
||||||
docker:
|
|
||||||
- image: buildpack-deps:stretch
|
|
||||||
working_directory: /home/circleci/project
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- azure-cli/install
|
|
||||||
- azure-cli/login-with-service-principal
|
|
||||||
- run:
|
|
||||||
command: |
|
|
||||||
cd ./src/test/hammerdb
|
|
||||||
sh run_hammerdb.sh citusbot_tpcc_benchmark_rg
|
|
||||||
name: install dependencies and run ch_benchmark tests
|
|
||||||
no_output_timeout: 20m
|
|
||||||
|
|
||||||
test-flakyness:
|
|
||||||
description: Runs a test multiple times to see if it's flaky
|
|
||||||
parallelism: 32
|
|
||||||
parameters:
|
|
||||||
pg_major:
|
|
||||||
description: 'postgres major version'
|
|
||||||
type: integer
|
|
||||||
image:
|
|
||||||
description: 'docker image to use as for the tests'
|
|
||||||
type: string
|
|
||||||
default: citus/failtester
|
|
||||||
image_tag:
|
|
||||||
description: 'docker image tag to use'
|
|
||||||
type: string
|
|
||||||
test:
|
|
||||||
description: 'the test file path that should be run multiple times'
|
|
||||||
type: string
|
|
||||||
default: ''
|
|
||||||
runs:
|
|
||||||
description: 'number of times that the test should be run in total'
|
|
||||||
type: integer
|
|
||||||
default: 8
|
|
||||||
skip:
|
|
||||||
description: 'A flag to bypass flaky test detection.'
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
docker:
|
|
||||||
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
|
|
||||||
working_directory: /home/circleci/project
|
|
||||||
resource_class: small
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- attach_workspace:
|
|
||||||
at: .
|
|
||||||
- run:
|
|
||||||
name: 'Detect regression tests need to be ran'
|
|
||||||
command: |
|
|
||||||
skip=<< parameters.skip >>
|
|
||||||
if [ "$skip" = true ]; then
|
|
||||||
echo "Skipping flaky test detection."
|
|
||||||
circleci-agent step halt
|
|
||||||
fi
|
|
||||||
|
|
||||||
testForDebugging="<< parameters.test >>"
|
|
||||||
|
|
||||||
if [ -z "$testForDebugging" ]; then
|
|
||||||
detected_changes=$(git diff origin/main... --name-only --diff-filter=AM | (grep 'src/test/regress/sql/.*.sql\|src/test/regress/spec/.*.spec' || true))
|
|
||||||
tests=${detected_changes}
|
|
||||||
else
|
|
||||||
tests=$testForDebugging;
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$tests" ]; then
|
|
||||||
echo "No test found."
|
|
||||||
circleci-agent step halt
|
|
||||||
else
|
|
||||||
echo "Detected tests " $tests
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo export tests=\""$tests"\" >> "$BASH_ENV"
|
|
||||||
source "$BASH_ENV"
|
|
||||||
- install_extension:
|
|
||||||
pg_major: << parameters.pg_major >>
|
|
||||||
- configure
|
|
||||||
- enable_core
|
|
||||||
- run:
|
|
||||||
name: 'Run minimal tests'
|
|
||||||
command: |
|
|
||||||
tests_array=($tests)
|
|
||||||
for test in "${tests_array[@]}"
|
|
||||||
do
|
|
||||||
test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/")
|
|
||||||
gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat << parameters.runs >> --use-base-schedule --use-whole-schedule-line
|
|
||||||
done
|
|
||||||
no_output_timeout: 2m
|
|
||||||
- save_logs_and_results
|
|
||||||
- save_regressions
|
|
||||||
- stack_trace
|
|
||||||
|
|
||||||
upload-coverage:
|
|
||||||
docker:
|
|
||||||
- image: 'citus/exttester:<< pipeline.parameters.pg15_version >><< pipeline.parameters.image_suffix >>'
|
|
||||||
working_directory: /home/circleci/project
|
|
||||||
steps:
|
|
||||||
- attach_workspace:
|
|
||||||
at: .
|
|
||||||
- run:
|
|
||||||
name: Upload coverage results to Code Climate
|
|
||||||
command: |
|
|
||||||
cc-test-reporter sum-coverage codeclimate/*.json -o total.json
|
|
||||||
cc-test-reporter upload-coverage -i total.json
|
|
||||||
|
|
||||||
workflows:
|
|
||||||
version: 2
|
|
||||||
flaky_test_debugging:
|
|
||||||
when: << pipeline.parameters.flaky_test >>
|
|
||||||
jobs:
|
|
||||||
- build:
|
|
||||||
name: build-flaky-15
|
|
||||||
pg_major: 15
|
|
||||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
|
||||||
|
|
||||||
- test-flakyness:
|
|
||||||
name: 'test-15_flaky'
|
|
||||||
pg_major: 15
|
|
||||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
|
||||||
requires: [build-flaky-15]
|
|
||||||
test: '<< pipeline.parameters.flaky_test >>'
|
|
||||||
runs: << pipeline.parameters.flaky_test_runs_per_job >>
|
|
||||||
|
|
||||||
build_and_test:
|
|
||||||
when:
|
|
||||||
not: << pipeline.parameters.flaky_test >>
|
|
||||||
jobs:
|
|
||||||
- build:
|
|
||||||
name: build-13
|
|
||||||
pg_major: 13
|
|
||||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
|
||||||
- build:
|
|
||||||
name: build-14
|
|
||||||
pg_major: 14
|
|
||||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
|
||||||
- build:
|
|
||||||
name: build-15
|
|
||||||
pg_major: 15
|
|
||||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
|
||||||
|
|
||||||
- check-style
|
|
||||||
- check-sql-snapshots
|
|
||||||
|
|
||||||
- test-citus: &test-citus-13
|
|
||||||
name: 'test-13_check-multi'
|
|
||||||
make: check-multi
|
|
||||||
pg_major: 13
|
|
||||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
|
||||||
requires: [build-13]
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-multi-1'
|
|
||||||
make: check-multi-1
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-mx'
|
|
||||||
make: check-multi-mx
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-vanilla'
|
|
||||||
make: check-vanilla
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-isolation'
|
|
||||||
make: check-isolation
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-operations'
|
|
||||||
make: check-operations
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-follower-cluster'
|
|
||||||
make: check-follower-cluster
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-columnar'
|
|
||||||
make: check-columnar
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-columnar-isolation'
|
|
||||||
make: check-columnar-isolation
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-failure'
|
|
||||||
image: citus/failtester
|
|
||||||
make: check-failure
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-enterprise'
|
|
||||||
make: check-enterprise
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-enterprise-isolation'
|
|
||||||
make: check-enterprise-isolation
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-enterprise-isolation-logicalrep-1'
|
|
||||||
make: check-enterprise-isolation-logicalrep-1
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-enterprise-isolation-logicalrep-2'
|
|
||||||
make: check-enterprise-isolation-logicalrep-2
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-enterprise-isolation-logicalrep-3'
|
|
||||||
make: check-enterprise-isolation-logicalrep-3
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-enterprise-failure'
|
|
||||||
image: citus/failtester
|
|
||||||
make: check-enterprise-failure
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-13
|
|
||||||
name: 'test-13_check-split'
|
|
||||||
make: check-split
|
|
||||||
|
|
||||||
- test-citus: &test-citus-14
|
|
||||||
name: 'test-14_check-split'
|
|
||||||
make: check-split
|
|
||||||
pg_major: 14
|
|
||||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
|
||||||
requires: [build-14]
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-enterprise'
|
|
||||||
make: check-enterprise
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-enterprise-isolation'
|
|
||||||
make: check-enterprise-isolation
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-enterprise-isolation-logicalrep-1'
|
|
||||||
make: check-enterprise-isolation-logicalrep-1
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-enterprise-isolation-logicalrep-2'
|
|
||||||
make: check-enterprise-isolation-logicalrep-2
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-enterprise-isolation-logicalrep-3'
|
|
||||||
make: check-enterprise-isolation-logicalrep-3
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-enterprise-failure'
|
|
||||||
image: citus/failtester
|
|
||||||
make: check-enterprise-failure
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-multi'
|
|
||||||
make: check-multi
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-multi-1'
|
|
||||||
make: check-multi-1
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-mx'
|
|
||||||
make: check-multi-mx
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-vanilla'
|
|
||||||
make: check-vanilla
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-isolation'
|
|
||||||
make: check-isolation
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-operations'
|
|
||||||
make: check-operations
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-follower-cluster'
|
|
||||||
make: check-follower-cluster
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-columnar'
|
|
||||||
make: check-columnar
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-columnar-isolation'
|
|
||||||
make: check-columnar-isolation
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-14
|
|
||||||
name: 'test-14_check-failure'
|
|
||||||
image: citus/failtester
|
|
||||||
make: check-failure
|
|
||||||
|
|
||||||
- test-citus: &test-citus-15
|
|
||||||
name: 'test-15_check-split'
|
|
||||||
make: check-split
|
|
||||||
pg_major: 15
|
|
||||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
|
||||||
requires: [build-15]
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-enterprise'
|
|
||||||
make: check-enterprise
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-enterprise-isolation'
|
|
||||||
make: check-enterprise-isolation
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-enterprise-isolation-logicalrep-1'
|
|
||||||
make: check-enterprise-isolation-logicalrep-1
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-enterprise-isolation-logicalrep-2'
|
|
||||||
make: check-enterprise-isolation-logicalrep-2
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-enterprise-isolation-logicalrep-3'
|
|
||||||
make: check-enterprise-isolation-logicalrep-3
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-enterprise-failure'
|
|
||||||
image: citus/failtester
|
|
||||||
make: check-enterprise-failure
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-multi'
|
|
||||||
make: check-multi
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-multi-1'
|
|
||||||
make: check-multi-1
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-mx'
|
|
||||||
make: check-multi-mx
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-vanilla'
|
|
||||||
make: check-vanilla
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-isolation'
|
|
||||||
make: check-isolation
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-operations'
|
|
||||||
make: check-operations
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-follower-cluster'
|
|
||||||
make: check-follower-cluster
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-columnar'
|
|
||||||
make: check-columnar
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-columnar-isolation'
|
|
||||||
make: check-columnar-isolation
|
|
||||||
- test-citus:
|
|
||||||
<<: *test-citus-15
|
|
||||||
name: 'test-15_check-failure'
|
|
||||||
image: citus/failtester
|
|
||||||
make: check-failure
|
|
||||||
|
|
||||||
- tap-test-citus: &tap-test-citus-13
|
|
||||||
name: 'test-13_tap-recovery'
|
|
||||||
suite: recovery
|
|
||||||
pg_major: 13
|
|
||||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
|
||||||
requires: [build-13]
|
|
||||||
- tap-test-citus:
|
|
||||||
<<: *tap-test-citus-13
|
|
||||||
name: 'test-13_tap-columnar-freezing'
|
|
||||||
suite: columnar_freezing
|
|
||||||
|
|
||||||
- tap-test-citus: &tap-test-citus-14
|
|
||||||
name: 'test-14_tap-recovery'
|
|
||||||
suite: recovery
|
|
||||||
pg_major: 14
|
|
||||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
|
||||||
requires: [build-14]
|
|
||||||
- tap-test-citus:
|
|
||||||
<<: *tap-test-citus-14
|
|
||||||
name: 'test-14_tap-columnar-freezing'
|
|
||||||
suite: columnar_freezing
|
|
||||||
|
|
||||||
- tap-test-citus: &tap-test-citus-15
|
|
||||||
name: 'test-15_tap-recovery'
|
|
||||||
suite: recovery
|
|
||||||
pg_major: 15
|
|
||||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
|
||||||
requires: [build-15]
|
|
||||||
- tap-test-citus:
|
|
||||||
<<: *tap-test-citus-15
|
|
||||||
name: 'test-15_tap-columnar-freezing'
|
|
||||||
suite: columnar_freezing
|
|
||||||
|
|
||||||
- test-arbitrary-configs:
|
|
||||||
name: 'test-13_check-arbitrary-configs'
|
|
||||||
pg_major: 13
|
|
||||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
|
||||||
requires: [build-13]
|
|
||||||
|
|
||||||
- test-arbitrary-configs:
|
|
||||||
name: 'test-14_check-arbitrary-configs'
|
|
||||||
pg_major: 14
|
|
||||||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
|
||||||
requires: [build-14]
|
|
||||||
|
|
||||||
- test-arbitrary-configs:
|
|
||||||
name: 'test-15_check-arbitrary-configs'
|
|
||||||
pg_major: 15
|
|
||||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
|
||||||
requires: [build-15]
|
|
||||||
|
|
||||||
- test-pg-upgrade:
|
|
||||||
name: 'test-13-14_check-pg-upgrade'
|
|
||||||
old_pg_major: 13
|
|
||||||
new_pg_major: 14
|
|
||||||
image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
|
|
||||||
requires: [build-13, build-14]
|
|
||||||
|
|
||||||
- test-pg-upgrade:
|
|
||||||
name: 'test-14-15_check-pg-upgrade'
|
|
||||||
old_pg_major: 14
|
|
||||||
new_pg_major: 15
|
|
||||||
image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
|
|
||||||
requires: [build-14, build-15]
|
|
||||||
|
|
||||||
- test-citus-upgrade:
|
|
||||||
name: test-13_check-citus-upgrade
|
|
||||||
pg_major: 13
|
|
||||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
|
||||||
requires: [build-13]
|
|
||||||
|
|
||||||
- upload-coverage:
|
|
||||||
requires:
|
|
||||||
- test-13_check-multi
|
|
||||||
- test-13_check-multi-1
|
|
||||||
- test-13_check-mx
|
|
||||||
- test-13_check-vanilla
|
|
||||||
- test-13_check-isolation
|
|
||||||
- test-13_check-operations
|
|
||||||
- test-13_check-follower-cluster
|
|
||||||
- test-13_check-columnar
|
|
||||||
- test-13_check-columnar-isolation
|
|
||||||
- test-13_tap-recovery
|
|
||||||
- test-13_tap-columnar-freezing
|
|
||||||
- test-13_check-failure
|
|
||||||
- test-13_check-enterprise
|
|
||||||
- test-13_check-enterprise-isolation
|
|
||||||
- test-13_check-enterprise-isolation-logicalrep-1
|
|
||||||
- test-13_check-enterprise-isolation-logicalrep-2
|
|
||||||
- test-13_check-enterprise-isolation-logicalrep-3
|
|
||||||
- test-13_check-enterprise-failure
|
|
||||||
- test-13_check-split
|
|
||||||
- test-13_check-arbitrary-configs
|
|
||||||
- test-14_check-multi
|
|
||||||
- test-14_check-multi-1
|
|
||||||
- test-14_check-mx
|
|
||||||
- test-14_check-vanilla
|
|
||||||
- test-14_check-isolation
|
|
||||||
- test-14_check-operations
|
|
||||||
- test-14_check-follower-cluster
|
|
||||||
- test-14_check-columnar
|
|
||||||
- test-14_check-columnar-isolation
|
|
||||||
- test-14_tap-recovery
|
|
||||||
- test-14_tap-columnar-freezing
|
|
||||||
- test-14_check-failure
|
|
||||||
- test-14_check-enterprise
|
|
||||||
- test-14_check-enterprise-isolation
|
|
||||||
- test-14_check-enterprise-isolation-logicalrep-1
|
|
||||||
- test-14_check-enterprise-isolation-logicalrep-2
|
|
||||||
- test-14_check-enterprise-isolation-logicalrep-3
|
|
||||||
- test-14_check-enterprise-failure
|
|
||||||
- test-14_check-split
|
|
||||||
- test-14_check-arbitrary-configs
|
|
||||||
- test-15_check-multi
|
|
||||||
- test-15_check-multi-1
|
|
||||||
- test-15_check-mx
|
|
||||||
- test-15_check-vanilla
|
|
||||||
- test-15_check-isolation
|
|
||||||
- test-15_check-operations
|
|
||||||
- test-15_check-follower-cluster
|
|
||||||
- test-15_check-columnar
|
|
||||||
- test-15_check-columnar-isolation
|
|
||||||
- test-15_tap-recovery
|
|
||||||
- test-15_tap-columnar-freezing
|
|
||||||
- test-15_check-failure
|
|
||||||
- test-15_check-enterprise
|
|
||||||
- test-15_check-enterprise-isolation
|
|
||||||
- test-15_check-enterprise-isolation-logicalrep-1
|
|
||||||
- test-15_check-enterprise-isolation-logicalrep-2
|
|
||||||
- test-15_check-enterprise-isolation-logicalrep-3
|
|
||||||
- test-15_check-enterprise-failure
|
|
||||||
- test-15_check-split
|
|
||||||
- test-15_check-arbitrary-configs
|
|
||||||
- test-13-14_check-pg-upgrade
|
|
||||||
- test-14-15_check-pg-upgrade
|
|
||||||
- test-13_check-citus-upgrade
|
|
||||||
|
|
||||||
- ch_benchmark:
|
|
||||||
requires: [build-13]
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- /ch_benchmark\/.*/ # match with ch_benchmark/ prefix
|
|
||||||
- tpcc_benchmark:
|
|
||||||
requires: [build-13]
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- /tpcc_benchmark\/.*/ # match with tpcc_benchmark/ prefix
|
|
||||||
- test-flakyness:
|
|
||||||
name: 'test-15_flaky'
|
|
||||||
pg_major: 15
|
|
||||||
image_tag: '<< pipeline.parameters.pg15_version >>'
|
|
||||||
requires: [build-15]
|
|
||||||
skip: << pipeline.parameters.skip_flaky_tests >>
|
|
|
@ -0,0 +1,23 @@
|
||||||
|
name: 'Parallelization matrix'
|
||||||
|
inputs:
|
||||||
|
count:
|
||||||
|
required: false
|
||||||
|
default: 32
|
||||||
|
outputs:
|
||||||
|
json:
|
||||||
|
value: ${{ steps.generate_matrix.outputs.json }}
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Generate parallelization matrix
|
||||||
|
id: generate_matrix
|
||||||
|
shell: bash
|
||||||
|
run: |-
|
||||||
|
json_array="{\"include\": ["
|
||||||
|
for ((i = 1; i <= ${{ inputs.count }}; i++)); do
|
||||||
|
json_array+="{\"id\":\"$i\"},"
|
||||||
|
done
|
||||||
|
json_array=${json_array%,}
|
||||||
|
json_array+=" ]}"
|
||||||
|
echo "json=$json_array" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "json=$json_array"
|
|
@ -0,0 +1,38 @@
|
||||||
|
name: save_logs_and_results
|
||||||
|
inputs:
|
||||||
|
folder:
|
||||||
|
required: false
|
||||||
|
default: "log"
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: actions/upload-artifact@v3.1.1
|
||||||
|
name: Upload logs
|
||||||
|
with:
|
||||||
|
name: ${{ inputs.folder }}
|
||||||
|
if-no-files-found: ignore
|
||||||
|
path: |
|
||||||
|
src/test/**/proxy.output
|
||||||
|
src/test/**/results/
|
||||||
|
src/test/**/tmp_check/master/log
|
||||||
|
src/test/**/tmp_check/worker.57638/log
|
||||||
|
src/test/**/tmp_check/worker.57637/log
|
||||||
|
src/test/**/*.diffs
|
||||||
|
src/test/**/out/ddls.sql
|
||||||
|
src/test/**/out/queries.sql
|
||||||
|
src/test/**/logfile_*
|
||||||
|
/tmp/pg_upgrade_newData_logs
|
||||||
|
- name: Publish regression.diffs
|
||||||
|
run: |-
|
||||||
|
diffs="$(find src/test/regress -name "*.diffs" -exec cat {} \;)"
|
||||||
|
if ! [ -z "$diffs" ]; then
|
||||||
|
echo '```diff' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo -E "$diffs" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo -E $diffs
|
||||||
|
fi
|
||||||
|
shell: bash
|
||||||
|
- name: Print stack traces
|
||||||
|
run: "./ci/print_stack_trace.sh"
|
||||||
|
if: failure()
|
||||||
|
shell: bash
|
|
@ -0,0 +1,35 @@
|
||||||
|
name: setup_extension
|
||||||
|
inputs:
|
||||||
|
pg_major:
|
||||||
|
required: false
|
||||||
|
skip_installation:
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Expose $PG_MAJOR to Github Env
|
||||||
|
run: |-
|
||||||
|
if [ -z "${{ inputs.pg_major }}" ]; then
|
||||||
|
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
shell: bash
|
||||||
|
- uses: actions/download-artifact@v3.0.1
|
||||||
|
with:
|
||||||
|
name: build-${{ env.PG_MAJOR }}
|
||||||
|
- name: Install Extension
|
||||||
|
if: ${{ inputs.skip_installation == 'false' }}
|
||||||
|
run: tar xfv "install-$PG_MAJOR.tar" --directory /
|
||||||
|
shell: bash
|
||||||
|
- name: Configure
|
||||||
|
run: |-
|
||||||
|
chown -R circleci .
|
||||||
|
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||||
|
gosu circleci ./configure --without-pg-version-check
|
||||||
|
shell: bash
|
||||||
|
- name: Enable core dumps
|
||||||
|
run: ulimit -c unlimited
|
||||||
|
shell: bash
|
|
@ -0,0 +1,27 @@
|
||||||
|
name: coverage
|
||||||
|
inputs:
|
||||||
|
flags:
|
||||||
|
required: false
|
||||||
|
codecov_token:
|
||||||
|
required: true
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: codecov/codecov-action@v3
|
||||||
|
with:
|
||||||
|
flags: ${{ inputs.flags }}
|
||||||
|
token: ${{ inputs.codecov_token }}
|
||||||
|
verbose: true
|
||||||
|
gcov: true
|
||||||
|
- name: Create codeclimate coverage
|
||||||
|
run: |-
|
||||||
|
lcov --directory . --capture --output-file lcov.info
|
||||||
|
lcov --remove lcov.info -o lcov.info '/usr/*'
|
||||||
|
sed "s=^SF:$PWD/=SF:=g" -i lcov.info # relative pats are required by codeclimate
|
||||||
|
mkdir -p /tmp/codeclimate
|
||||||
|
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
|
||||||
|
shell: bash
|
||||||
|
- uses: actions/upload-artifact@v3.1.1
|
||||||
|
with:
|
||||||
|
path: "/tmp/codeclimate/*.json"
|
||||||
|
name: codeclimate
|
|
@ -0,0 +1,490 @@
|
||||||
|
name: Build & Test
|
||||||
|
run-name: Build & Test - ${{ github.event.pull_request.title || github.ref_name }}
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
skip_test_flakyness:
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
pull_request:
|
||||||
|
types: [opened, reopened,synchronize]
|
||||||
|
jobs:
|
||||||
|
# Since GHA does not interpolate env varibles in matrix context, we need to
|
||||||
|
# define them in a separate job and use them in other jobs.
|
||||||
|
params:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Initialize parameters
|
||||||
|
outputs:
|
||||||
|
build_image_name: "citus/extbuilder"
|
||||||
|
test_image_name: "citus/exttester"
|
||||||
|
citusupgrade_image_name: "citus/citusupgradetester"
|
||||||
|
fail_test_image_name: "citus/failtester"
|
||||||
|
pgupgrade_image_name: "citus/pgupgradetester"
|
||||||
|
style_checker_image_name: "citus/stylechecker"
|
||||||
|
style_checker_tools_version: "0.8.18"
|
||||||
|
image_suffix: "-vc4b1573"
|
||||||
|
pg13_version: '{ "major": "13", "full": "13.10" }'
|
||||||
|
pg14_version: '{ "major": "14", "full": "14.7" }'
|
||||||
|
pg15_version: '{ "major": "15", "full": "15.2" }'
|
||||||
|
upgrade_pg_versions: "13.10-14.7-15.2"
|
||||||
|
steps:
|
||||||
|
# Since GHA jobs needs at least one step we use a noop step here.
|
||||||
|
- name: Set up parameters
|
||||||
|
run: echo 'noop'
|
||||||
|
check-sql-snapshots:
|
||||||
|
needs: params
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: ${{ needs.params.outputs.build_image_name }}:latest
|
||||||
|
options: --user root
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- name: Check Snapshots
|
||||||
|
run: |
|
||||||
|
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||||
|
ci/check_sql_snapshots.sh
|
||||||
|
check-style:
|
||||||
|
needs: params
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: ${{ needs.params.outputs.style_checker_image_name }}:${{ needs.params.outputs.style_checker_tools_version }}${{ needs.params.outputs.image_suffix }}
|
||||||
|
steps:
|
||||||
|
- name: Check Snapshots
|
||||||
|
run: |
|
||||||
|
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Check C Style
|
||||||
|
run: citus_indent --check
|
||||||
|
- name: Fix whitespace
|
||||||
|
run: ci/editorconfig.sh && git diff --exit-code
|
||||||
|
- name: Remove useless declarations
|
||||||
|
run: ci/remove_useless_declarations.sh && git diff --cached --exit-code
|
||||||
|
- name: Normalize test output
|
||||||
|
run: ci/normalize_expected.sh && git diff --exit-code
|
||||||
|
- name: Check for C-style comments in migration files
|
||||||
|
run: ci/disallow_c_comments_in_migrations.sh && git diff --exit-code
|
||||||
|
- name: 'Check for comment--cached ns that start with # character in spec files'
|
||||||
|
run: ci/disallow_hash_comments_in_spec_files.sh && git diff --exit-code
|
||||||
|
- name: Check for gitignore entries .for source files
|
||||||
|
run: ci/fix_gitignore.sh && git diff --exit-code
|
||||||
|
- name: Check for lengths of changelog entries
|
||||||
|
run: ci/disallow_long_changelog_entries.sh
|
||||||
|
- name: Check for banned C API usage
|
||||||
|
run: ci/banned.h.sh
|
||||||
|
- name: Check for tests missing in schedules
|
||||||
|
run: ci/check_all_tests_are_run.sh
|
||||||
|
- name: Check if all CI scripts are actually run
|
||||||
|
run: ci/check_all_ci_scripts_are_run.sh
|
||||||
|
- name: Check if all GUCs are sorted alphabetically
|
||||||
|
run: ci/check_gucs_are_alphabetically_sorted.sh
|
||||||
|
build:
|
||||||
|
needs: params
|
||||||
|
name: Build for PG${{ fromJson(matrix.pg_version).major }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
image_name:
|
||||||
|
- ${{ needs.params.outputs.build_image_name }}
|
||||||
|
image_suffix:
|
||||||
|
- ${{ needs.params.outputs.image_suffix}}
|
||||||
|
pg_version:
|
||||||
|
- ${{ needs.params.outputs.pg13_version }}
|
||||||
|
- ${{ needs.params.outputs.pg14_version }}
|
||||||
|
- ${{ needs.params.outputs.pg15_version }}
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
|
||||||
|
options: --user root
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- name: Expose $PG_MAJOR to Github Env
|
||||||
|
run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||||
|
shell: bash
|
||||||
|
- name: Build
|
||||||
|
run: "./ci/build-citus.sh"
|
||||||
|
shell: bash
|
||||||
|
- uses: actions/upload-artifact@v3.1.1
|
||||||
|
with:
|
||||||
|
name: build-${{ env.PG_MAJOR }}
|
||||||
|
path: |-
|
||||||
|
./build-${{ env.PG_MAJOR }}/*
|
||||||
|
./install-${{ env.PG_MAJOR }}.tar
|
||||||
|
test-citus:
|
||||||
|
name: PG${{ fromJson(matrix.pg_version).major }} - ${{ matrix.make }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
suite:
|
||||||
|
- regress
|
||||||
|
image_name:
|
||||||
|
- ${{ needs.params.outputs.test_image_name }}
|
||||||
|
pg_version:
|
||||||
|
- ${{ needs.params.outputs.pg13_version }}
|
||||||
|
- ${{ needs.params.outputs.pg14_version }}
|
||||||
|
- ${{ needs.params.outputs.pg15_version }}
|
||||||
|
make:
|
||||||
|
- check-split
|
||||||
|
- check-multi
|
||||||
|
- check-multi-1
|
||||||
|
- check-multi-mx
|
||||||
|
- check-vanilla
|
||||||
|
- check-isolation
|
||||||
|
- check-operations
|
||||||
|
- check-follower-cluster
|
||||||
|
- check-columnar
|
||||||
|
- check-columnar-isolation
|
||||||
|
- check-enterprise
|
||||||
|
- check-enterprise-isolation
|
||||||
|
- check-enterprise-isolation-logicalrep-1
|
||||||
|
- check-enterprise-isolation-logicalrep-2
|
||||||
|
- check-enterprise-isolation-logicalrep-3
|
||||||
|
include:
|
||||||
|
- make: check-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg13_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-enterprise-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg13_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-enterprise-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: check-enterprise-failure
|
||||||
|
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||||
|
suite: regress
|
||||||
|
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
- make: installcheck
|
||||||
|
suite: recovery
|
||||||
|
image_name: ${{ needs.params.outputs.test_image_name }}
|
||||||
|
pg_version: ${{ needs.params.outputs.pg13_version }}
|
||||||
|
- make: installcheck
|
||||||
|
suite: recovery
|
||||||
|
image_name: ${{ needs.params.outputs.test_image_name }}
|
||||||
|
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||||
|
- make: installcheck
|
||||||
|
suite: recovery
|
||||||
|
image_name: ${{ needs.params.outputs.test_image_name }}
|
||||||
|
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||||
|
- make: installcheck
|
||||||
|
suite: columnar_freezing
|
||||||
|
image_name: ${{ needs.params.outputs.test_image_name }}
|
||||||
|
pg_version: ${{ needs.params.outputs.pg13_version }}
|
||||||
|
- make: installcheck
|
||||||
|
suite: columnar_freezing
|
||||||
|
image_name: ${{ needs.params.outputs.test_image_name }}
|
||||||
|
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||||
|
- make: installcheck
|
||||||
|
suite: columnar_freezing
|
||||||
|
image_name: ${{ needs.params.outputs.test_image_name }}
|
||||||
|
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||||
|
options: --user root --dns=8.8.8.8
|
||||||
|
# Due to Github creates a default network for each job, we need to use
|
||||||
|
# --dns= to have similar DNS settings as our other CI systems or local
|
||||||
|
# machines. Otherwise, we may see different results.
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
- name: Run Test
|
||||||
|
run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }}
|
||||||
|
timeout-minutes: 20
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
folder: ${{ fromJson(matrix.pg_version).major }}_${{ matrix.make }}
|
||||||
|
- uses: "./.github/actions/upload_coverage"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
flags: ${{ env.PG_MAJOR }}_${{ matrix.suite }}_${{ matrix.make }}
|
||||||
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
test-arbitrary-configs:
|
||||||
|
name: PG${{ fromJson(matrix.pg_version).major }} - check-arbitrary-configs-${{ matrix.parallel }}
|
||||||
|
runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"]
|
||||||
|
container:
|
||||||
|
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||||
|
options: --user root
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- build
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
image_name:
|
||||||
|
- ${{ needs.params.outputs.fail_test_image_name }}
|
||||||
|
pg_version:
|
||||||
|
- ${{ needs.params.outputs.pg13_version }}
|
||||||
|
- ${{ needs.params.outputs.pg14_version }}
|
||||||
|
- ${{ needs.params.outputs.pg15_version }}
|
||||||
|
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
- name: Test arbitrary configs
|
||||||
|
run: |-
|
||||||
|
# we use parallel jobs to split the tests into 6 parts and run them in parallel
|
||||||
|
# the script below extracts the tests for the current job
|
||||||
|
N=6 # Total number of jobs (see matrix.parallel)
|
||||||
|
X=${{ matrix.parallel }} # Current job number
|
||||||
|
TESTS=$(src/test/regress/citus_tests/print_test_names.py |
|
||||||
|
tr '\n' ',' | awk -v N="$N" -v X="$X" -F, '{
|
||||||
|
split("", parts)
|
||||||
|
for (i = 1; i <= NF; i++) {
|
||||||
|
parts[i % N] = parts[i % N] $i ","
|
||||||
|
}
|
||||||
|
print substr(parts[X], 1, length(parts[X])-1)
|
||||||
|
}')
|
||||||
|
echo $TESTS
|
||||||
|
gosu circleci \
|
||||||
|
make -C src/test/regress \
|
||||||
|
check-arbitrary-configs parallel=4 CONFIGS=$TESTS
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
||||||
|
- uses: "./.github/actions/upload_coverage"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
flags: ${{ env.pg_major }}_upgrade
|
||||||
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
test-pg-upgrade:
|
||||||
|
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: "${{ needs.params.outputs.pgupgrade_image_name }}:${{ needs.params.outputs.upgrade_pg_versions }}${{ needs.params.outputs.image_suffix }}"
|
||||||
|
options: --user root
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- build
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- old_pg_major: 13
|
||||||
|
new_pg_major: 14
|
||||||
|
- old_pg_major: 14
|
||||||
|
new_pg_major: 15
|
||||||
|
env:
|
||||||
|
old_pg_major: ${{ matrix.old_pg_major }}
|
||||||
|
new_pg_major: ${{ matrix.new_pg_major }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
with:
|
||||||
|
pg_major: "${{ env.old_pg_major }}"
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
with:
|
||||||
|
pg_major: "${{ env.new_pg_major }}"
|
||||||
|
- name: Install and test postgres upgrade
|
||||||
|
run: |-
|
||||||
|
gosu circleci \
|
||||||
|
make -C src/test/regress \
|
||||||
|
check-pg-upgrade \
|
||||||
|
old-bindir=/usr/lib/postgresql/${{ env.old_pg_major }}/bin \
|
||||||
|
new-bindir=/usr/lib/postgresql/${{ env.new_pg_major }}/bin
|
||||||
|
- name: Copy pg_upgrade logs for newData dir
|
||||||
|
run: |-
|
||||||
|
mkdir -p /tmp/pg_upgrade_newData_logs
|
||||||
|
if ls src/test/regress/tmp_upgrade/newData/*.log 1> /dev/null 2>&1; then
|
||||||
|
cp src/test/regress/tmp_upgrade/newData/*.log /tmp/pg_upgrade_newData_logs
|
||||||
|
fi
|
||||||
|
if: failure()
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
||||||
|
- uses: "./.github/actions/upload_coverage"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
|
||||||
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
test-citus-upgrade:
|
||||||
|
name: PG${{ fromJson(needs.params.outputs.pg13_version).major }} - check-citus-upgrade
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg13_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||||
|
options: --user root
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
with:
|
||||||
|
skip_installation: true
|
||||||
|
- name: Install and test citus upgrade
|
||||||
|
run: |-
|
||||||
|
# run make check-citus-upgrade for all citus versions
|
||||||
|
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
|
||||||
|
for citus_version in ${CITUS_VERSIONS}; do \
|
||||||
|
gosu circleci \
|
||||||
|
make -C src/test/regress \
|
||||||
|
check-citus-upgrade \
|
||||||
|
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
|
||||||
|
citus-old-version=${citus_version} \
|
||||||
|
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
|
||||||
|
citus-post-tar=${GITHUB_WORKSPACE}/install-$PG_MAJOR.tar; \
|
||||||
|
done;
|
||||||
|
# run make check-citus-upgrade-mixed for all citus versions
|
||||||
|
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
|
||||||
|
for citus_version in ${CITUS_VERSIONS}; do \
|
||||||
|
gosu circleci \
|
||||||
|
make -C src/test/regress \
|
||||||
|
check-citus-upgrade-mixed \
|
||||||
|
citus-old-version=${citus_version} \
|
||||||
|
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
|
||||||
|
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
|
||||||
|
citus-post-tar=${GITHUB_WORKSPACE}/install-$PG_MAJOR.tar; \
|
||||||
|
done;
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
||||||
|
- uses: "./.github/actions/upload_coverage"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
flags: ${{ env.pg_major }}_upgrade
|
||||||
|
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
upload-coverage:
|
||||||
|
if: always()
|
||||||
|
env:
|
||||||
|
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg15_version).full }}${{ needs.params.outputs.image_suffix }}
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- test-citus
|
||||||
|
- test-arbitrary-configs
|
||||||
|
- test-citus-upgrade
|
||||||
|
- test-pg-upgrade
|
||||||
|
steps:
|
||||||
|
- uses: actions/download-artifact@v3.0.1
|
||||||
|
with:
|
||||||
|
name: "codeclimate"
|
||||||
|
path: "codeclimate"
|
||||||
|
- name: Upload coverage results to Code Climate
|
||||||
|
run: |-
|
||||||
|
cc-test-reporter sum-coverage codeclimate/*.json -o total.json
|
||||||
|
cc-test-reporter upload-coverage -i total.json
|
||||||
|
ch_benchmark:
|
||||||
|
name: CH Benchmark
|
||||||
|
if: startsWith(github.ref, 'refs/heads/ch_benchmark/')
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: azure/login@v1
|
||||||
|
with:
|
||||||
|
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||||
|
- name: install dependencies and run ch_benchmark tests
|
||||||
|
uses: azure/CLI@v1
|
||||||
|
with:
|
||||||
|
inlineScript: |
|
||||||
|
cd ./src/test/hammerdb
|
||||||
|
chmod +x run_hammerdb.sh
|
||||||
|
run_hammerdb.sh citusbot_ch_benchmark_rg
|
||||||
|
tpcc_benchmark:
|
||||||
|
name: TPCC Benchmark
|
||||||
|
if: startsWith(github.ref, 'refs/heads/tpcc_benchmark/')
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: azure/login@v1
|
||||||
|
with:
|
||||||
|
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||||
|
- name: install dependencies and run tpcc_benchmark tests
|
||||||
|
uses: azure/CLI@v1
|
||||||
|
with:
|
||||||
|
inlineScript: |
|
||||||
|
cd ./src/test/hammerdb
|
||||||
|
chmod +x run_hammerdb.sh
|
||||||
|
run_hammerdb.sh citusbot_tpcc_benchmark_rg
|
||||||
|
prepare_parallelization_matrix_32:
|
||||||
|
name: Parallel 32
|
||||||
|
if: ${{ needs.test-flakyness-pre.outputs.tests != ''}}
|
||||||
|
needs: test-flakyness-pre
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
outputs:
|
||||||
|
json: ${{ steps.parallelization.outputs.json }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/parallelization"
|
||||||
|
id: parallelization
|
||||||
|
with:
|
||||||
|
count: 32
|
||||||
|
test-flakyness-pre:
|
||||||
|
name: Detect regression tests need to be ran
|
||||||
|
if: ${{ !inputs.skip_test_flakyness }}}
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs: build
|
||||||
|
outputs:
|
||||||
|
tests: ${{ steps.detect-regression-tests.outputs.tests }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Detect regression tests need to be ran
|
||||||
|
id: detect-regression-tests
|
||||||
|
run: |-
|
||||||
|
detected_changes=$(git diff origin/release-11.2... --name-only --diff-filter=AM | (grep 'src/test/regress/sql/.*\.sql\|src/test/regress/spec/.*\.spec\|src/test/regress/citus_tests/test/test_.*\.py' || true))
|
||||||
|
tests=${detected_changes}
|
||||||
|
if [ -z "$tests" ]; then
|
||||||
|
echo "No test found."
|
||||||
|
else
|
||||||
|
echo "Detected tests " $tests
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo 'tests<<EOF' >> $GITHUB_OUTPUT
|
||||||
|
echo "$tests" >> "$GITHUB_OUTPUT"
|
||||||
|
echo 'EOF' >> $GITHUB_OUTPUT
|
||||||
|
test-flakyness:
|
||||||
|
if: false
|
||||||
|
name: Test flakyness
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
container:
|
||||||
|
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ needs.params.outputs.pg15_version }}${{ needs.params.outputs.image_suffix }}
|
||||||
|
options: --user root
|
||||||
|
env:
|
||||||
|
runs: 8
|
||||||
|
needs:
|
||||||
|
- params
|
||||||
|
- build
|
||||||
|
- test-flakyness-pre
|
||||||
|
- prepare_parallelization_matrix_32
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: actions/download-artifact@v3.0.1
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
- name: Run minimal tests
|
||||||
|
run: |-
|
||||||
|
tests="${{ needs.test-flakyness-pre.outputs.tests }}"
|
||||||
|
tests_array=($tests)
|
||||||
|
for test in "${tests_array[@]}"
|
||||||
|
do
|
||||||
|
test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/")
|
||||||
|
gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line
|
||||||
|
done
|
||||||
|
shell: bash
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
|
@ -0,0 +1,79 @@
|
||||||
|
name: Flaky test debugging
|
||||||
|
run-name: Flaky test debugging - ${{ inputs.flaky_test }} (${{ inputs.flaky_test_runs_per_job }}x${{ inputs.flaky_test_parallel_jobs }})
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
flaky_test:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
description: Test to run
|
||||||
|
flaky_test_runs_per_job:
|
||||||
|
required: false
|
||||||
|
default: 8
|
||||||
|
type: number
|
||||||
|
description: Number of times to run the test
|
||||||
|
flaky_test_parallel_jobs:
|
||||||
|
required: false
|
||||||
|
default: 32
|
||||||
|
type: number
|
||||||
|
description: Number of parallel jobs to run
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build Citus
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
|
||||||
|
options: --user root
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- name: Configure, Build, and Install
|
||||||
|
run: |
|
||||||
|
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
|
||||||
|
./ci/build-citus.sh
|
||||||
|
shell: bash
|
||||||
|
- uses: actions/upload-artifact@v3.1.1
|
||||||
|
with:
|
||||||
|
name: build-${{ env.PG_MAJOR }}
|
||||||
|
path: |-
|
||||||
|
./build-${{ env.PG_MAJOR }}/*
|
||||||
|
./install-${{ env.PG_MAJOR }}.tar
|
||||||
|
prepare_parallelization_matrix:
|
||||||
|
name: Prepare parallelization matrix
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
json: ${{ steps.parallelization.outputs.json }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/parallelization"
|
||||||
|
id: parallelization
|
||||||
|
with:
|
||||||
|
count: ${{ inputs.flaky_test_parallel_jobs }}
|
||||||
|
test_flakyness:
|
||||||
|
name: Test flakyness
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ${{ vars.fail_test_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
|
||||||
|
options: --user root
|
||||||
|
needs:
|
||||||
|
[build, prepare_parallelization_matrix]
|
||||||
|
env:
|
||||||
|
test: "${{ inputs.flaky_test }}"
|
||||||
|
runs: "${{ inputs.flaky_test_runs_per_job }}"
|
||||||
|
skip: false
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.0
|
||||||
|
- uses: "./.github/actions/setup_extension"
|
||||||
|
- name: Run minimal tests
|
||||||
|
run: |-
|
||||||
|
gosu circleci src/test/regress/citus_tests/run_test.py ${{ env.test }} --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line
|
||||||
|
shell: bash
|
||||||
|
- uses: "./.github/actions/save_logs_and_results"
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
folder: ${{ matrix.id }}
|
|
@ -20,14 +20,16 @@ jobs:
|
||||||
- name: Get Postgres Versions
|
- name: Get Postgres Versions
|
||||||
id: get-postgres-versions
|
id: get-postgres-versions
|
||||||
run: |
|
run: |
|
||||||
# Postgres versions are stored in .circleci/config.yml file in "build-[pg-version] format. Below command
|
set -euxo pipefail
|
||||||
# extracts the versions and get the unique values.
|
# Postgres versions are stored in .github/workflows/build_and_test.yml
|
||||||
pg_versions=`grep -Eo 'build-[[:digit:]]{2}' .circleci/config.yml|sed -e "s/^build-//"|sort|uniq|tr '\n' ','| head -c -1`
|
# file in json strings with major and full keys.
|
||||||
|
# Below command extracts the versions and get the unique values.
|
||||||
|
pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE '"major": "[0-9]+", "full": "[0-9.]+"' | sed -E 's/"major": "([0-9]+)", "full": "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',')
|
||||||
pg_versions_array="[ ${pg_versions} ]"
|
pg_versions_array="[ ${pg_versions} ]"
|
||||||
echo "Supported PG Versions: ${pg_versions_array}"
|
echo "Supported PG Versions: ${pg_versions_array}"
|
||||||
# Below line is needed to set the output variable to be used in the next job
|
# Below line is needed to set the output variable to be used in the next job
|
||||||
echo "pg_versions=${pg_versions_array}" >> $GITHUB_OUTPUT
|
echo "pg_versions=${pg_versions_array}" >> $GITHUB_OUTPUT
|
||||||
|
shell: bash
|
||||||
rpm_build_tests:
|
rpm_build_tests:
|
||||||
name: rpm_build_tests
|
name: rpm_build_tests
|
||||||
needs: get_postgres_versions_from_file
|
needs: get_postgres_versions_from_file
|
||||||
|
@ -43,7 +45,7 @@ jobs:
|
||||||
- oraclelinux-7
|
- oraclelinux-7
|
||||||
- oraclelinux-8
|
- oraclelinux-8
|
||||||
- centos-7
|
- centos-7
|
||||||
- centos-8
|
- almalinux-8
|
||||||
- almalinux-9
|
- almalinux-9
|
||||||
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
|
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
|
||||||
|
|
||||||
|
@ -109,10 +111,8 @@ jobs:
|
||||||
- debian-buster-all
|
- debian-buster-all
|
||||||
- debian-bookworm-all
|
- debian-bookworm-all
|
||||||
- debian-bullseye-all
|
- debian-bullseye-all
|
||||||
- ubuntu-bionic-all
|
|
||||||
- ubuntu-focal-all
|
- ubuntu-focal-all
|
||||||
- ubuntu-jammy-all
|
- ubuntu-jammy-all
|
||||||
- ubuntu-kinetic-all
|
|
||||||
|
|
||||||
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
|
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
|
||||||
|
|
||||||
|
@ -157,7 +157,6 @@ jobs:
|
||||||
|
|
||||||
apt-get update -y
|
apt-get update -y
|
||||||
## Install required packages to execute packaging tools for deb based distros
|
## Install required packages to execute packaging tools for deb based distros
|
||||||
apt install python3-dev python3-pip -y
|
apt-get install python3-dev python3-pip -y
|
||||||
sudo apt-get purge -y python3-yaml
|
apt-get purge -y python3-yaml
|
||||||
python3 -m pip install --upgrade pip setuptools==57.5.0
|
|
||||||
./.github/packaging/validate_build_output.sh "deb"
|
./.github/packaging/validate_build_output.sh "deb"
|
||||||
|
|
47
CHANGELOG.md
47
CHANGELOG.md
|
@ -1,3 +1,50 @@
|
||||||
|
|
||||||
|
### citus v11.2.2 (February 12, 2024) ###
|
||||||
|
|
||||||
|
* Fixes a bug in background shard rebalancer where the replicate
|
||||||
|
reference tables task fails if the current user is not a superuser (#6930)
|
||||||
|
|
||||||
|
* Fixes a bug related to non-existent objects in DDL commands (#6984)
|
||||||
|
|
||||||
|
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
|
||||||
|
|
||||||
|
* Fixes a bug with deleting colocation groups (#6929)
|
||||||
|
|
||||||
|
* Fixes incorrect results on fetching scrollable with hold cursors (#7014)
|
||||||
|
|
||||||
|
* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236)
|
||||||
|
|
||||||
|
* Fixes the incorrect column count after ALTER TABLE (#7379)
|
||||||
|
|
||||||
|
* Improves failure handling of distributed execution (#7090)
|
||||||
|
|
||||||
|
* Makes sure to disallow creating a replicated distributed table
|
||||||
|
concurrently (#7219)
|
||||||
|
|
||||||
|
* Removes pg_send_cancellation (#7135)
|
||||||
|
|
||||||
|
### citus v11.2.1 (April 20, 2023) ###
|
||||||
|
|
||||||
|
* Correctly reports shard size in `citus_shards` view (#6748)
|
||||||
|
|
||||||
|
* Fixes a bug in shard copy operations (#6721)
|
||||||
|
|
||||||
|
* Fixes a bug with `INSERT .. SELECT` queries with identity columns (#6802)
|
||||||
|
|
||||||
|
* Fixes an uninitialized memory access in shard split API (#6845)
|
||||||
|
|
||||||
|
* Fixes compilation for PG13.10 and PG14.7 (#6711)
|
||||||
|
|
||||||
|
* Fixes memory leak in `alter_distributed_table` (#6726)
|
||||||
|
|
||||||
|
* Fixes memory leak issue with query results that returns single row (#6724)
|
||||||
|
|
||||||
|
* Prevents using `alter_distributed_table` and `undistribute_table` UDFs when a
|
||||||
|
table has identity columns (#6738)
|
||||||
|
|
||||||
|
* Prevents using identity columns on data types other than `bigint` on
|
||||||
|
distributed tables (#6738)
|
||||||
|
|
||||||
### citus v11.2.0 (January 30, 2023) ###
|
### citus v11.2.0 (January 30, 2023) ###
|
||||||
|
|
||||||
* Adds support for outer joins with reference tables / complex subquery-CTEs
|
* Adds support for outer joins with reference tables / complex subquery-CTEs
|
||||||
|
|
16
Makefile
16
Makefile
|
@ -11,7 +11,7 @@ endif
|
||||||
|
|
||||||
include Makefile.global
|
include Makefile.global
|
||||||
|
|
||||||
all: extension pg_send_cancellation
|
all: extension
|
||||||
|
|
||||||
|
|
||||||
# build columnar only
|
# build columnar only
|
||||||
|
@ -40,22 +40,14 @@ clean-full:
|
||||||
|
|
||||||
install-downgrades:
|
install-downgrades:
|
||||||
$(MAKE) -C src/backend/distributed/ install-downgrades
|
$(MAKE) -C src/backend/distributed/ install-downgrades
|
||||||
install-all: install-headers install-pg_send_cancellation
|
install-all: install-headers
|
||||||
$(MAKE) -C src/backend/columnar/ install-all
|
$(MAKE) -C src/backend/columnar/ install-all
|
||||||
$(MAKE) -C src/backend/distributed/ install-all
|
$(MAKE) -C src/backend/distributed/ install-all
|
||||||
|
|
||||||
# build citus_send_cancellation binary
|
|
||||||
pg_send_cancellation:
|
|
||||||
$(MAKE) -C src/bin/pg_send_cancellation/ all
|
|
||||||
install-pg_send_cancellation: pg_send_cancellation
|
|
||||||
$(MAKE) -C src/bin/pg_send_cancellation/ install
|
|
||||||
clean-pg_send_cancellation:
|
|
||||||
$(MAKE) -C src/bin/pg_send_cancellation/ clean
|
|
||||||
.PHONY: pg_send_cancellation install-pg_send_cancellation clean-pg_send_cancellation
|
|
||||||
|
|
||||||
# Add to generic targets
|
# Add to generic targets
|
||||||
install: install-extension install-headers install-pg_send_cancellation
|
install: install-extension install-headers
|
||||||
clean: clean-extension clean-pg_send_cancellation
|
clean: clean-extension
|
||||||
|
|
||||||
# apply or check style
|
# apply or check style
|
||||||
reindent:
|
reindent:
|
||||||
|
|
|
@ -15,9 +15,6 @@ PG_MAJOR=${PG_MAJOR:?please provide the postgres major version}
|
||||||
codename=${VERSION#*(}
|
codename=${VERSION#*(}
|
||||||
codename=${codename%)*}
|
codename=${codename%)*}
|
||||||
|
|
||||||
# get project from argument
|
|
||||||
project="${CIRCLE_PROJECT_REPONAME}"
|
|
||||||
|
|
||||||
# we'll do everything with absolute paths
|
# we'll do everything with absolute paths
|
||||||
basedir="$(pwd)"
|
basedir="$(pwd)"
|
||||||
|
|
||||||
|
@ -28,7 +25,7 @@ build_ext() {
|
||||||
pg_major="$1"
|
pg_major="$1"
|
||||||
|
|
||||||
builddir="${basedir}/build-${pg_major}"
|
builddir="${basedir}/build-${pg_major}"
|
||||||
echo "Beginning build of ${project} for PostgreSQL ${pg_major}..." >&2
|
echo "Beginning build for PostgreSQL ${pg_major}..." >&2
|
||||||
|
|
||||||
# do everything in a subdirectory to avoid clutter in current directory
|
# do everything in a subdirectory to avoid clutter in current directory
|
||||||
mkdir -p "${builddir}" && cd "${builddir}"
|
mkdir -p "${builddir}" && cd "${builddir}"
|
||||||
|
|
|
@ -14,8 +14,8 @@ ci_scripts=$(
|
||||||
grep -v -E '^(ci_helpers.sh|fix_style.sh)$'
|
grep -v -E '^(ci_helpers.sh|fix_style.sh)$'
|
||||||
)
|
)
|
||||||
for script in $ci_scripts; do
|
for script in $ci_scripts; do
|
||||||
if ! grep "\\bci/$script\\b" .circleci/config.yml > /dev/null; then
|
if ! grep "\\bci/$script\\b" -r .github > /dev/null; then
|
||||||
echo "ERROR: CI script with name \"$script\" is not actually used in .circleci/config.yml"
|
echo "ERROR: CI script with name \"$script\" is not actually used in .github folder"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then
|
if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then
|
||||||
|
|
|
@ -1,96 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Testing this script locally requires you to set the following environment
|
|
||||||
# variables:
|
|
||||||
# CIRCLE_BRANCH, GIT_USERNAME and GIT_TOKEN
|
|
||||||
|
|
||||||
# fail if trying to reference a variable that is not set.
|
|
||||||
set -u
|
|
||||||
# exit immediately if a command fails
|
|
||||||
set -e
|
|
||||||
# Fail on pipe failures
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
PR_BRANCH="${CIRCLE_BRANCH}"
|
|
||||||
ENTERPRISE_REMOTE="https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/citusdata/citus-enterprise"
|
|
||||||
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
source ci/ci_helpers.sh
|
|
||||||
|
|
||||||
# List executed commands. This is done so debugging this script is easier when
|
|
||||||
# it fails. It's explicitly done after git remote add so username and password
|
|
||||||
# are not shown in CI output (even though it's also filtered out by CircleCI)
|
|
||||||
set -x
|
|
||||||
|
|
||||||
check_compile () {
|
|
||||||
echo "INFO: checking if merged code can be compiled"
|
|
||||||
./configure --without-libcurl
|
|
||||||
make -j10
|
|
||||||
}
|
|
||||||
|
|
||||||
# Clone current git repo (which should be community) to a temporary working
|
|
||||||
# directory and go there
|
|
||||||
GIT_DIR_ROOT="$(git rev-parse --show-toplevel)"
|
|
||||||
TMP_GIT_DIR="$(mktemp --directory -t citus-merge-check.XXXXXXXXX)"
|
|
||||||
git clone "$GIT_DIR_ROOT" "$TMP_GIT_DIR"
|
|
||||||
cd "$TMP_GIT_DIR"
|
|
||||||
|
|
||||||
# Fails in CI without this
|
|
||||||
git config user.email "citus-bot@microsoft.com"
|
|
||||||
git config user.name "citus bot"
|
|
||||||
|
|
||||||
# Disable "set -x" temporarily, because $ENTERPRISE_REMOTE contains passwords
|
|
||||||
{ set +x ; } 2> /dev/null
|
|
||||||
git remote add enterprise "$ENTERPRISE_REMOTE"
|
|
||||||
set -x
|
|
||||||
|
|
||||||
git remote set-url --push enterprise no-pushing
|
|
||||||
|
|
||||||
# Fetch enterprise-master
|
|
||||||
git fetch enterprise enterprise-master
|
|
||||||
|
|
||||||
|
|
||||||
git checkout "enterprise/enterprise-master"
|
|
||||||
|
|
||||||
if git merge --no-commit "origin/$PR_BRANCH"; then
|
|
||||||
echo "INFO: community PR branch could be merged into enterprise-master"
|
|
||||||
# check that we can compile after the merge
|
|
||||||
if check_compile; then
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "WARN: Failed to compile after community PR branch was merged into enterprise"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# undo partial merge
|
|
||||||
git merge --abort
|
|
||||||
|
|
||||||
# If we have a conflict on enterprise merge on the master branch, we have a problem.
|
|
||||||
# Provide an error message to indicate that enterprise merge is needed to fix this check.
|
|
||||||
if [[ $PR_BRANCH = master ]]; then
|
|
||||||
echo "ERROR: Master branch has merge conflicts with enterprise-master."
|
|
||||||
echo "Try re-running this CI job after merging your changes into enterprise-master."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! git fetch enterprise "$PR_BRANCH" ; then
|
|
||||||
echo "ERROR: enterprise/$PR_BRANCH was not found and community PR branch could not be merged into enterprise-master"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Show the top commit of the enterprise PR branch to make debugging easier
|
|
||||||
git log -n 1 "enterprise/$PR_BRANCH"
|
|
||||||
|
|
||||||
# Check that this branch contains the top commit of the current community PR
|
|
||||||
# branch. If it does not it means it's not up to date with the current PR, so
|
|
||||||
# the enterprise branch should be updated.
|
|
||||||
if ! git merge-base --is-ancestor "origin/$PR_BRANCH" "enterprise/$PR_BRANCH" ; then
|
|
||||||
echo "ERROR: enterprise/$PR_BRANCH is not up to date with community PR branch"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Now check if we can merge the enterprise PR into enterprise-master without
|
|
||||||
# issues.
|
|
||||||
git merge --no-commit "enterprise/$PR_BRANCH"
|
|
||||||
# check that we can compile after the merge
|
|
||||||
check_compile
|
|
|
@ -1,6 +1,6 @@
|
||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
# Guess values for system-dependent variables and create Makefiles.
|
# Guess values for system-dependent variables and create Makefiles.
|
||||||
# Generated by GNU Autoconf 2.69 for Citus 11.2devel.
|
# Generated by GNU Autoconf 2.69 for Citus 11.2.2.
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||||
|
@ -579,8 +579,8 @@ MAKEFLAGS=
|
||||||
# Identity of this package.
|
# Identity of this package.
|
||||||
PACKAGE_NAME='Citus'
|
PACKAGE_NAME='Citus'
|
||||||
PACKAGE_TARNAME='citus'
|
PACKAGE_TARNAME='citus'
|
||||||
PACKAGE_VERSION='11.2devel'
|
PACKAGE_VERSION='11.2.2'
|
||||||
PACKAGE_STRING='Citus 11.2devel'
|
PACKAGE_STRING='Citus 11.2.2'
|
||||||
PACKAGE_BUGREPORT=''
|
PACKAGE_BUGREPORT=''
|
||||||
PACKAGE_URL=''
|
PACKAGE_URL=''
|
||||||
|
|
||||||
|
@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
|
||||||
# Omit some internal or obsolete options to make the list less imposing.
|
# Omit some internal or obsolete options to make the list less imposing.
|
||||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||||
cat <<_ACEOF
|
cat <<_ACEOF
|
||||||
\`configure' configures Citus 11.2devel to adapt to many kinds of systems.
|
\`configure' configures Citus 11.2.2 to adapt to many kinds of systems.
|
||||||
|
|
||||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||||
|
|
||||||
|
@ -1324,7 +1324,7 @@ fi
|
||||||
|
|
||||||
if test -n "$ac_init_help"; then
|
if test -n "$ac_init_help"; then
|
||||||
case $ac_init_help in
|
case $ac_init_help in
|
||||||
short | recursive ) echo "Configuration of Citus 11.2devel:";;
|
short | recursive ) echo "Configuration of Citus 11.2.2:";;
|
||||||
esac
|
esac
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
|
|
||||||
|
@ -1429,7 +1429,7 @@ fi
|
||||||
test -n "$ac_init_help" && exit $ac_status
|
test -n "$ac_init_help" && exit $ac_status
|
||||||
if $ac_init_version; then
|
if $ac_init_version; then
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
Citus configure 11.2devel
|
Citus configure 11.2.2
|
||||||
generated by GNU Autoconf 2.69
|
generated by GNU Autoconf 2.69
|
||||||
|
|
||||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||||
|
@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
|
||||||
This file contains any messages produced by compilers while
|
This file contains any messages produced by compilers while
|
||||||
running configure, to aid debugging if configure makes a mistake.
|
running configure, to aid debugging if configure makes a mistake.
|
||||||
|
|
||||||
It was created by Citus $as_me 11.2devel, which was
|
It was created by Citus $as_me 11.2.2, which was
|
||||||
generated by GNU Autoconf 2.69. Invocation command line was
|
generated by GNU Autoconf 2.69. Invocation command line was
|
||||||
|
|
||||||
$ $0 $@
|
$ $0 $@
|
||||||
|
@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
||||||
# report actual input values of CONFIG_FILES etc. instead of their
|
# report actual input values of CONFIG_FILES etc. instead of their
|
||||||
# values after options handling.
|
# values after options handling.
|
||||||
ac_log="
|
ac_log="
|
||||||
This file was extended by Citus $as_me 11.2devel, which was
|
This file was extended by Citus $as_me 11.2.2, which was
|
||||||
generated by GNU Autoconf 2.69. Invocation command line was
|
generated by GNU Autoconf 2.69. Invocation command line was
|
||||||
|
|
||||||
CONFIG_FILES = $CONFIG_FILES
|
CONFIG_FILES = $CONFIG_FILES
|
||||||
|
@ -5455,7 +5455,7 @@ _ACEOF
|
||||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||||
ac_cs_version="\\
|
ac_cs_version="\\
|
||||||
Citus config.status 11.2devel
|
Citus config.status 11.2.2
|
||||||
configured by $0, generated by GNU Autoconf 2.69,
|
configured by $0, generated by GNU Autoconf 2.69,
|
||||||
with options \\"\$ac_cs_config\\"
|
with options \\"\$ac_cs_config\\"
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
# everyone needing autoconf installed, the resulting files are checked
|
# everyone needing autoconf installed, the resulting files are checked
|
||||||
# into the SCM.
|
# into the SCM.
|
||||||
|
|
||||||
AC_INIT([Citus], [11.2devel])
|
AC_INIT([Citus], [11.2.2])
|
||||||
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
||||||
|
|
||||||
# we'll need sed and awk for some of the version commands
|
# we'll need sed and awk for some of the version commands
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Citus extension
|
# Citus extension
|
||||||
comment = 'Citus distributed database'
|
comment = 'Citus distributed database'
|
||||||
default_version = '11.2-1'
|
default_version = '11.2-2'
|
||||||
module_pathname = '$libdir/citus'
|
module_pathname = '$libdir/citus'
|
||||||
relocatable = false
|
relocatable = false
|
||||||
schema = pg_catalog
|
schema = pg_catalog
|
||||||
|
|
|
@ -183,6 +183,7 @@ static TableConversionReturn * AlterDistributedTable(TableConversionParameters *
|
||||||
static TableConversionReturn * AlterTableSetAccessMethod(
|
static TableConversionReturn * AlterTableSetAccessMethod(
|
||||||
TableConversionParameters *params);
|
TableConversionParameters *params);
|
||||||
static TableConversionReturn * ConvertTable(TableConversionState *con);
|
static TableConversionReturn * ConvertTable(TableConversionState *con);
|
||||||
|
static TableConversionReturn * ConvertTableInternal(TableConversionState *con);
|
||||||
static bool SwitchToSequentialAndLocalExecutionIfShardNameTooLong(char *relationName,
|
static bool SwitchToSequentialAndLocalExecutionIfShardNameTooLong(char *relationName,
|
||||||
char *longestShardName);
|
char *longestShardName);
|
||||||
static void DropIndexesNotSupportedByColumnar(Oid relationId,
|
static void DropIndexesNotSupportedByColumnar(Oid relationId,
|
||||||
|
@ -216,6 +217,8 @@ static bool WillRecreateForeignKeyToReferenceTable(Oid relationId,
|
||||||
static void WarningsForDroppingForeignKeysWithDistributedTables(Oid relationId);
|
static void WarningsForDroppingForeignKeysWithDistributedTables(Oid relationId);
|
||||||
static void ErrorIfUnsupportedCascadeObjects(Oid relationId);
|
static void ErrorIfUnsupportedCascadeObjects(Oid relationId);
|
||||||
static bool DoesCascadeDropUnsupportedObject(Oid classId, Oid id, HTAB *nodeMap);
|
static bool DoesCascadeDropUnsupportedObject(Oid classId, Oid id, HTAB *nodeMap);
|
||||||
|
static TableConversionReturn * CopyTableConversionReturnIntoCurrentContext(
|
||||||
|
TableConversionReturn *tableConversionReturn);
|
||||||
|
|
||||||
PG_FUNCTION_INFO_V1(undistribute_table);
|
PG_FUNCTION_INFO_V1(undistribute_table);
|
||||||
PG_FUNCTION_INFO_V1(alter_distributed_table);
|
PG_FUNCTION_INFO_V1(alter_distributed_table);
|
||||||
|
@ -402,6 +405,7 @@ UndistributeTable(TableConversionParameters *params)
|
||||||
params->conversionType = UNDISTRIBUTE_TABLE;
|
params->conversionType = UNDISTRIBUTE_TABLE;
|
||||||
params->shardCountIsNull = true;
|
params->shardCountIsNull = true;
|
||||||
TableConversionState *con = CreateTableConversion(params);
|
TableConversionState *con = CreateTableConversion(params);
|
||||||
|
|
||||||
return ConvertTable(con);
|
return ConvertTable(con);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -441,6 +445,7 @@ AlterDistributedTable(TableConversionParameters *params)
|
||||||
ereport(DEBUG1, (errmsg("setting multi shard modify mode to sequential")));
|
ereport(DEBUG1, (errmsg("setting multi shard modify mode to sequential")));
|
||||||
SetLocalMultiShardModifyModeToSequential();
|
SetLocalMultiShardModifyModeToSequential();
|
||||||
}
|
}
|
||||||
|
|
||||||
return ConvertTable(con);
|
return ConvertTable(con);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -511,9 +516,9 @@ AlterTableSetAccessMethod(TableConversionParameters *params)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ConvertTable is used for converting a table into a new table with different properties.
|
* ConvertTableInternal is used for converting a table into a new table with different
|
||||||
* The conversion is done by creating a new table, moving everything to the new table and
|
* properties. The conversion is done by creating a new table, moving everything to the
|
||||||
* dropping the old one. So the oid of the table is not preserved.
|
* new table and dropping the old one. So the oid of the table is not preserved.
|
||||||
*
|
*
|
||||||
* The new table will have the same name, columns and rows. It will also have partitions,
|
* The new table will have the same name, columns and rows. It will also have partitions,
|
||||||
* views, sequences of the old table. Finally it will have everything created by
|
* views, sequences of the old table. Finally it will have everything created by
|
||||||
|
@ -532,7 +537,7 @@ AlterTableSetAccessMethod(TableConversionParameters *params)
|
||||||
* in case you add a new way to return from this function.
|
* in case you add a new way to return from this function.
|
||||||
*/
|
*/
|
||||||
TableConversionReturn *
|
TableConversionReturn *
|
||||||
ConvertTable(TableConversionState *con)
|
ConvertTableInternal(TableConversionState *con)
|
||||||
{
|
{
|
||||||
InTableTypeConversionFunctionCall = true;
|
InTableTypeConversionFunctionCall = true;
|
||||||
|
|
||||||
|
@ -869,10 +874,77 @@ ConvertTable(TableConversionState *con)
|
||||||
SetLocalEnableLocalReferenceForeignKeys(oldEnableLocalReferenceForeignKeys);
|
SetLocalEnableLocalReferenceForeignKeys(oldEnableLocalReferenceForeignKeys);
|
||||||
|
|
||||||
InTableTypeConversionFunctionCall = false;
|
InTableTypeConversionFunctionCall = false;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CopyTableConversionReturnIntoCurrentContext copies given tableConversionReturn
|
||||||
|
* into CurrentMemoryContext.
|
||||||
|
*/
|
||||||
|
static TableConversionReturn *
|
||||||
|
CopyTableConversionReturnIntoCurrentContext(TableConversionReturn *tableConversionReturn)
|
||||||
|
{
|
||||||
|
TableConversionReturn *tableConversionReturnCopy = NULL;
|
||||||
|
if (tableConversionReturn)
|
||||||
|
{
|
||||||
|
tableConversionReturnCopy = palloc0(sizeof(TableConversionReturn));
|
||||||
|
List *copyForeignKeyCommands = NIL;
|
||||||
|
char *foreignKeyCommand = NULL;
|
||||||
|
foreach_ptr(foreignKeyCommand, tableConversionReturn->foreignKeyCommands)
|
||||||
|
{
|
||||||
|
char *copyForeignKeyCommand = MemoryContextStrdup(CurrentMemoryContext,
|
||||||
|
foreignKeyCommand);
|
||||||
|
copyForeignKeyCommands = lappend(copyForeignKeyCommands,
|
||||||
|
copyForeignKeyCommand);
|
||||||
|
}
|
||||||
|
tableConversionReturnCopy->foreignKeyCommands = copyForeignKeyCommands;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tableConversionReturnCopy;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ConvertTable is a wrapper for ConvertTableInternal to persist only
|
||||||
|
* TableConversionReturn and delete all other allocations.
|
||||||
|
*/
|
||||||
|
static TableConversionReturn *
|
||||||
|
ConvertTable(TableConversionState *con)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We do not allow alter_distributed_table and undistribute_table operations
|
||||||
|
* for tables with identity columns. This is because we do not have a proper way
|
||||||
|
* of keeping sequence states consistent across the cluster.
|
||||||
|
*/
|
||||||
|
ErrorIfTableHasIdentityColumn(con->relationId);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* when there are many partitions or colocated tables, memory usage is
|
||||||
|
* accumulated. Free context for each call to ConvertTable.
|
||||||
|
*/
|
||||||
|
MemoryContext convertTableContext =
|
||||||
|
AllocSetContextCreate(CurrentMemoryContext,
|
||||||
|
"citus_convert_table_context",
|
||||||
|
ALLOCSET_DEFAULT_SIZES);
|
||||||
|
MemoryContext oldContext = MemoryContextSwitchTo(convertTableContext);
|
||||||
|
|
||||||
|
TableConversionReturn *tableConversionReturn = ConvertTableInternal(con);
|
||||||
|
|
||||||
|
MemoryContextSwitchTo(oldContext);
|
||||||
|
|
||||||
|
/* persist TableConversionReturn in oldContext */
|
||||||
|
TableConversionReturn *tableConversionReturnCopy =
|
||||||
|
CopyTableConversionReturnIntoCurrentContext(tableConversionReturn);
|
||||||
|
|
||||||
|
/* delete convertTableContext */
|
||||||
|
MemoryContextDelete(convertTableContext);
|
||||||
|
|
||||||
|
return tableConversionReturnCopy;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DropIndexesNotSupportedByColumnar is a helper function used during accces
|
* DropIndexesNotSupportedByColumnar is a helper function used during accces
|
||||||
* method conversion to drop the indexes that are not supported by columnarAM.
|
* method conversion to drop the indexes that are not supported by columnarAM.
|
||||||
|
@ -1523,96 +1595,6 @@ CreateMaterializedViewDDLCommand(Oid matViewOid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This function marks all the identity sequences as distributed on the given table.
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
MarkIdentitiesAsDistributed(Oid targetRelationId)
|
|
||||||
{
|
|
||||||
Relation relation = relation_open(targetRelationId, AccessShareLock);
|
|
||||||
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
|
||||||
relation_close(relation, NoLock);
|
|
||||||
|
|
||||||
bool missingSequenceOk = false;
|
|
||||||
|
|
||||||
for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts;
|
|
||||||
attributeIndex++)
|
|
||||||
{
|
|
||||||
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
|
|
||||||
|
|
||||||
if (attributeForm->attidentity)
|
|
||||||
{
|
|
||||||
Oid seqOid = getIdentitySequence(targetRelationId, attributeForm->attnum,
|
|
||||||
missingSequenceOk);
|
|
||||||
|
|
||||||
ObjectAddress seqAddress = { 0 };
|
|
||||||
ObjectAddressSet(seqAddress, RelationRelationId, seqOid);
|
|
||||||
MarkObjectDistributed(&seqAddress);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This function returns sql statements to rename identites on the given table
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
PrepareRenameIdentitiesCommands(Oid sourceRelationId, Oid targetRelationId,
|
|
||||||
List **outCoordinatorCommands, List **outWorkerCommands)
|
|
||||||
{
|
|
||||||
Relation targetRelation = relation_open(targetRelationId, AccessShareLock);
|
|
||||||
TupleDesc targetTupleDescriptor = RelationGetDescr(targetRelation);
|
|
||||||
relation_close(targetRelation, NoLock);
|
|
||||||
|
|
||||||
bool missingSequenceOk = false;
|
|
||||||
|
|
||||||
for (int attributeIndex = 0; attributeIndex < targetTupleDescriptor->natts;
|
|
||||||
attributeIndex++)
|
|
||||||
{
|
|
||||||
Form_pg_attribute attributeForm = TupleDescAttr(targetTupleDescriptor,
|
|
||||||
attributeIndex);
|
|
||||||
|
|
||||||
if (attributeForm->attidentity)
|
|
||||||
{
|
|
||||||
char *columnName = NameStr(attributeForm->attname);
|
|
||||||
|
|
||||||
Oid targetSequenceOid = getIdentitySequence(targetRelationId,
|
|
||||||
attributeForm->attnum,
|
|
||||||
missingSequenceOk);
|
|
||||||
char *targetSequenceName = generate_relation_name(targetSequenceOid, NIL);
|
|
||||||
|
|
||||||
Oid sourceSequenceOid = getIdentitySequence(sourceRelationId,
|
|
||||||
attributeForm->attnum,
|
|
||||||
missingSequenceOk);
|
|
||||||
char *sourceSequenceName = generate_relation_name(sourceSequenceOid, NIL);
|
|
||||||
|
|
||||||
/* to rename sequence on the coordinator */
|
|
||||||
*outCoordinatorCommands = lappend(*outCoordinatorCommands, psprintf(
|
|
||||||
"SET citus.enable_ddl_propagation TO OFF; ALTER SEQUENCE %s RENAME TO %s; RESET citus.enable_ddl_propagation;",
|
|
||||||
quote_identifier(
|
|
||||||
targetSequenceName),
|
|
||||||
quote_identifier(
|
|
||||||
sourceSequenceName)));
|
|
||||||
|
|
||||||
/* update workers to use existing sequence and drop the new one generated by PG */
|
|
||||||
bool missingTableOk = true;
|
|
||||||
*outWorkerCommands = lappend(*outWorkerCommands,
|
|
||||||
GetAlterColumnWithNextvalDefaultCmd(
|
|
||||||
sourceSequenceOid, sourceRelationId,
|
|
||||||
columnName,
|
|
||||||
missingTableOk));
|
|
||||||
|
|
||||||
|
|
||||||
/* drop the sequence generated by identity column */
|
|
||||||
*outWorkerCommands = lappend(*outWorkerCommands, psprintf(
|
|
||||||
"DROP SEQUENCE IF EXISTS %s",
|
|
||||||
quote_identifier(
|
|
||||||
targetSequenceName)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ReplaceTable replaces the source table with the target table.
|
* ReplaceTable replaces the source table with the target table.
|
||||||
* It moves all the rows of the source table to target table with INSERT SELECT.
|
* It moves all the rows of the source table to target table with INSERT SELECT.
|
||||||
|
@ -1671,24 +1653,6 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
||||||
ExecuteQueryViaSPI(query->data, SPI_OK_INSERT);
|
ExecuteQueryViaSPI(query->data, SPI_OK_INSERT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Drop identity dependencies (sequences marked as DEPENDENCY_INTERNAL) on the workers
|
|
||||||
* to keep their states after the source table is dropped.
|
|
||||||
*/
|
|
||||||
List *ownedIdentitySequences = getOwnedSequences_internal(sourceId, 0,
|
|
||||||
DEPENDENCY_INTERNAL);
|
|
||||||
if (ownedIdentitySequences != NIL && ShouldSyncTableMetadata(sourceId))
|
|
||||||
{
|
|
||||||
char *qualifiedTableName = quote_qualified_identifier(schemaName, sourceName);
|
|
||||||
StringInfo command = makeStringInfo();
|
|
||||||
|
|
||||||
appendStringInfo(command,
|
|
||||||
"SELECT pg_catalog.worker_drop_sequence_dependency(%s);",
|
|
||||||
quote_literal_cstr(qualifiedTableName));
|
|
||||||
|
|
||||||
SendCommandToWorkersWithMetadata(command->data);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Modify regular sequence dependencies (sequences marked as DEPENDENCY_AUTO)
|
* Modify regular sequence dependencies (sequences marked as DEPENDENCY_AUTO)
|
||||||
*/
|
*/
|
||||||
|
@ -1748,23 +1712,6 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
||||||
quote_qualified_identifier(schemaName, sourceName))));
|
quote_qualified_identifier(schemaName, sourceName))));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We need to prepare rename identities commands before dropping the original table,
|
|
||||||
* otherwise we can't find the original names of the identity sequences.
|
|
||||||
* We prepare separate commands for the coordinator and the workers because:
|
|
||||||
* In the coordinator, we simply need to rename the identity sequences
|
|
||||||
* to their names on the old table, because right now the identity
|
|
||||||
* sequences have default names generated by Postgres with the creation of the new table
|
|
||||||
* In the workers, we have not dropped the original identity sequences,
|
|
||||||
* so what we do is we alter the columns and set their default to the
|
|
||||||
* original identity sequences, and after that we drop the new sequences.
|
|
||||||
*/
|
|
||||||
List *coordinatorCommandsToRenameIdentites = NIL;
|
|
||||||
List *workerCommandsToRenameIdentites = NIL;
|
|
||||||
PrepareRenameIdentitiesCommands(sourceId, targetId,
|
|
||||||
&coordinatorCommandsToRenameIdentites,
|
|
||||||
&workerCommandsToRenameIdentites);
|
|
||||||
|
|
||||||
resetStringInfo(query);
|
resetStringInfo(query);
|
||||||
appendStringInfo(query, "DROP %sTABLE %s CASCADE",
|
appendStringInfo(query, "DROP %sTABLE %s CASCADE",
|
||||||
IsForeignTable(sourceId) ? "FOREIGN " : "",
|
IsForeignTable(sourceId) ? "FOREIGN " : "",
|
||||||
|
@ -1782,27 +1729,6 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
|
||||||
quote_qualified_identifier(schemaName, targetName),
|
quote_qualified_identifier(schemaName, targetName),
|
||||||
quote_identifier(sourceName));
|
quote_identifier(sourceName));
|
||||||
ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY);
|
ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY);
|
||||||
|
|
||||||
char *coordinatorCommand = NULL;
|
|
||||||
foreach_ptr(coordinatorCommand, coordinatorCommandsToRenameIdentites)
|
|
||||||
{
|
|
||||||
ExecuteQueryViaSPI(coordinatorCommand, SPI_OK_UTILITY);
|
|
||||||
}
|
|
||||||
|
|
||||||
char *workerCommand = NULL;
|
|
||||||
foreach_ptr(workerCommand, workerCommandsToRenameIdentites)
|
|
||||||
{
|
|
||||||
SendCommandToWorkersWithMetadata(workerCommand);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* To preserve identity sequences states in case of redistributing the table again,
|
|
||||||
* we don't drop them when we undistribute a table. To maintain consistency and
|
|
||||||
* avoid future problems if we redistribute the table, we want to apply all changes happening to
|
|
||||||
* the identity sequence in the coordinator to their corresponding sequences in the workers as well.
|
|
||||||
* That's why we have to mark identity sequences as distributed
|
|
||||||
*/
|
|
||||||
MarkIdentitiesAsDistributed(targetId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1131,7 +1131,7 @@ DropIdentitiesOnTable(Oid relationId)
|
||||||
{
|
{
|
||||||
Relation relation = relation_open(relationId, AccessShareLock);
|
Relation relation = relation_open(relationId, AccessShareLock);
|
||||||
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
||||||
relation_close(relation, NoLock);
|
List *dropCommandList = NIL;
|
||||||
|
|
||||||
for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts;
|
for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts;
|
||||||
attributeIndex++)
|
attributeIndex++)
|
||||||
|
@ -1151,15 +1151,23 @@ DropIdentitiesOnTable(Oid relationId)
|
||||||
qualifiedTableName,
|
qualifiedTableName,
|
||||||
columnName);
|
columnName);
|
||||||
|
|
||||||
/*
|
dropCommandList = lappend(dropCommandList, dropCommand->data);
|
||||||
* We need to disable/enable ddl propagation for this command, to prevent
|
|
||||||
* sending unnecessary ALTER COLUMN commands for partitions, to MX workers.
|
|
||||||
*/
|
|
||||||
ExecuteAndLogUtilityCommandList(list_make3(DISABLE_DDL_PROPAGATION,
|
|
||||||
dropCommand->data,
|
|
||||||
ENABLE_DDL_PROPAGATION));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
relation_close(relation, NoLock);
|
||||||
|
|
||||||
|
char *dropCommand = NULL;
|
||||||
|
foreach_ptr(dropCommand, dropCommandList)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We need to disable/enable ddl propagation for this command, to prevent
|
||||||
|
* sending unnecessary ALTER COLUMN commands for partitions, to MX workers.
|
||||||
|
*/
|
||||||
|
ExecuteAndLogUtilityCommandList(list_make3(DISABLE_DDL_PROPAGATION,
|
||||||
|
dropCommand,
|
||||||
|
ENABLE_DDL_PROPAGATION));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -388,6 +388,19 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName,
|
||||||
if (!IsColocateWithDefault(colocateWithTableName) && !IsColocateWithNone(
|
if (!IsColocateWithDefault(colocateWithTableName) && !IsColocateWithNone(
|
||||||
colocateWithTableName))
|
colocateWithTableName))
|
||||||
{
|
{
|
||||||
|
if (replicationModel != REPLICATION_MODEL_STREAMING)
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errmsg("cannot create distributed table "
|
||||||
|
"concurrently because Citus allows "
|
||||||
|
"concurrent table distribution only when "
|
||||||
|
"citus.shard_replication_factor = 1"),
|
||||||
|
errhint("table %s is requested to be colocated "
|
||||||
|
"with %s which has "
|
||||||
|
"citus.shard_replication_factor > 1",
|
||||||
|
get_rel_name(relationId),
|
||||||
|
colocateWithTableName)));
|
||||||
|
}
|
||||||
|
|
||||||
EnsureColocateWithTableIsValid(relationId, distributionMethod,
|
EnsureColocateWithTableIsValid(relationId, distributionMethod,
|
||||||
distributionColumnName,
|
distributionColumnName,
|
||||||
colocateWithTableName);
|
colocateWithTableName);
|
||||||
|
@ -1190,7 +1203,7 @@ EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid ownerRelationId
|
||||||
foreach_oid(citusTableId, citusTableIdList)
|
foreach_oid(citusTableId, citusTableIdList)
|
||||||
{
|
{
|
||||||
List *seqInfoList = NIL;
|
List *seqInfoList = NIL;
|
||||||
GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0);
|
GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0, DEPENDENCY_AUTO);
|
||||||
|
|
||||||
SequenceInfo *seqInfo = NULL;
|
SequenceInfo *seqInfo = NULL;
|
||||||
foreach_ptr(seqInfo, seqInfoList)
|
foreach_ptr(seqInfo, seqInfoList)
|
||||||
|
@ -1267,7 +1280,7 @@ EnsureRelationHasCompatibleSequenceTypes(Oid relationId)
|
||||||
{
|
{
|
||||||
List *seqInfoList = NIL;
|
List *seqInfoList = NIL;
|
||||||
|
|
||||||
GetDependentSequencesWithRelation(relationId, &seqInfoList, 0);
|
GetDependentSequencesWithRelation(relationId, &seqInfoList, 0, DEPENDENCY_AUTO);
|
||||||
EnsureDistributedSequencesHaveOneType(relationId, seqInfoList);
|
EnsureDistributedSequencesHaveOneType(relationId, seqInfoList);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1608,6 +1621,8 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
||||||
{
|
{
|
||||||
Oid parentRelationId = InvalidOid;
|
Oid parentRelationId = InvalidOid;
|
||||||
|
|
||||||
|
ErrorIfTableHasUnsupportedIdentityColumn(relationId);
|
||||||
|
|
||||||
EnsureLocalTableEmptyIfNecessary(relationId, distributionMethod);
|
EnsureLocalTableEmptyIfNecessary(relationId, distributionMethod);
|
||||||
|
|
||||||
/* user really wants triggers? */
|
/* user really wants triggers? */
|
||||||
|
|
|
@ -370,7 +370,7 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
|
||||||
bool creatingShellTableOnRemoteNode = true;
|
bool creatingShellTableOnRemoteNode = true;
|
||||||
List *tableDDLCommands = GetFullTableCreationCommands(relationId,
|
List *tableDDLCommands = GetFullTableCreationCommands(relationId,
|
||||||
WORKER_NEXTVAL_SEQUENCE_DEFAULTS,
|
WORKER_NEXTVAL_SEQUENCE_DEFAULTS,
|
||||||
INCLUDE_IDENTITY_AS_SEQUENCE_DEFAULTS,
|
INCLUDE_IDENTITY,
|
||||||
creatingShellTableOnRemoteNode);
|
creatingShellTableOnRemoteNode);
|
||||||
TableDDLCommand *tableDDLCommand = NULL;
|
TableDDLCommand *tableDDLCommand = NULL;
|
||||||
foreach_ptr(tableDDLCommand, tableDDLCommands)
|
foreach_ptr(tableDDLCommand, tableDDLCommands)
|
||||||
|
|
|
@ -33,7 +33,8 @@
|
||||||
|
|
||||||
/* Local functions forward declarations for helper functions */
|
/* Local functions forward declarations for helper functions */
|
||||||
static bool OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId);
|
static bool OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId);
|
||||||
static Oid SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress);
|
static Oid SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress, char
|
||||||
|
depType);
|
||||||
static List * FilterDistributedSequences(GrantStmt *stmt);
|
static List * FilterDistributedSequences(GrantStmt *stmt);
|
||||||
|
|
||||||
|
|
||||||
|
@ -183,7 +184,7 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList,
|
||||||
|
|
||||||
char *columnName = NameStr(attributeForm->attname);
|
char *columnName = NameStr(attributeForm->attname);
|
||||||
List *columnOwnedSequences =
|
List *columnOwnedSequences =
|
||||||
getOwnedSequences_internal(relationId, attributeIndex + 1, 0);
|
getOwnedSequences_internal(relationId, attributeIndex + 1, DEPENDENCY_AUTO);
|
||||||
|
|
||||||
if (attributeForm->atthasdef && list_length(columnOwnedSequences) == 0)
|
if (attributeForm->atthasdef && list_length(columnOwnedSequences) == 0)
|
||||||
{
|
{
|
||||||
|
@ -453,21 +454,22 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString,
|
||||||
/* the code-path only supports a single object */
|
/* the code-path only supports a single object */
|
||||||
Assert(list_length(addresses) == 1);
|
Assert(list_length(addresses) == 1);
|
||||||
|
|
||||||
|
/* We have already asserted that we have exactly 1 address in the addresses. */
|
||||||
|
ObjectAddress *address = linitial(addresses);
|
||||||
|
|
||||||
/* error out if the sequence is distributed */
|
/* error out if the sequence is distributed */
|
||||||
if (IsAnyObjectDistributed(addresses))
|
if (IsAnyObjectDistributed(addresses) || SequenceUsedInDistributedTable(address,
|
||||||
|
DEPENDENCY_INTERNAL))
|
||||||
{
|
{
|
||||||
ereport(ERROR, (errmsg(
|
ereport(ERROR, (errmsg(
|
||||||
"Altering a distributed sequence is currently not supported.")));
|
"Altering a distributed sequence is currently not supported.")));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We have already asserted that we have exactly 1 address in the addresses. */
|
|
||||||
ObjectAddress *address = linitial(addresses);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* error out if the sequence is used in a distributed table
|
* error out if the sequence is used in a distributed table
|
||||||
* and this is an ALTER SEQUENCE .. AS .. statement
|
* and this is an ALTER SEQUENCE .. AS .. statement
|
||||||
*/
|
*/
|
||||||
Oid citusTableId = SequenceUsedInDistributedTable(address);
|
Oid citusTableId = SequenceUsedInDistributedTable(address, DEPENDENCY_AUTO);
|
||||||
if (citusTableId != InvalidOid)
|
if (citusTableId != InvalidOid)
|
||||||
{
|
{
|
||||||
List *options = stmt->options;
|
List *options = stmt->options;
|
||||||
|
@ -497,16 +499,19 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString,
|
||||||
* SequenceUsedInDistributedTable returns true if the argument sequence
|
* SequenceUsedInDistributedTable returns true if the argument sequence
|
||||||
* is used as the default value of a column in a distributed table.
|
* is used as the default value of a column in a distributed table.
|
||||||
* Returns false otherwise
|
* Returns false otherwise
|
||||||
|
* See DependencyType for the possible values of depType.
|
||||||
|
* We use DEPENDENCY_INTERNAL for sequences created by identity column.
|
||||||
|
* DEPENDENCY_AUTO for regular sequences.
|
||||||
*/
|
*/
|
||||||
static Oid
|
static Oid
|
||||||
SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress)
|
SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress, char depType)
|
||||||
{
|
{
|
||||||
List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE);
|
List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE);
|
||||||
Oid citusTableId = InvalidOid;
|
Oid citusTableId = InvalidOid;
|
||||||
foreach_oid(citusTableId, citusTableIdList)
|
foreach_oid(citusTableId, citusTableIdList)
|
||||||
{
|
{
|
||||||
List *seqInfoList = NIL;
|
List *seqInfoList = NIL;
|
||||||
GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0);
|
GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0, depType);
|
||||||
SequenceInfo *seqInfo = NULL;
|
SequenceInfo *seqInfo = NULL;
|
||||||
foreach_ptr(seqInfo, seqInfoList)
|
foreach_ptr(seqInfo, seqInfoList)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1378,29 +1378,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We check for ADD COLUMN .. GENERATED .. AS IDENTITY expr
|
|
||||||
* since it uses a sequence as an internal dependency
|
|
||||||
* we should deparse the statement
|
|
||||||
*/
|
|
||||||
constraint = NULL;
|
|
||||||
foreach_ptr(constraint, columnConstraints)
|
|
||||||
{
|
|
||||||
if (constraint->contype == CONSTR_IDENTITY)
|
|
||||||
{
|
|
||||||
deparseAT = true;
|
|
||||||
useInitialDDLCommandString = false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Since we don't support constraints for AT_AddColumn
|
|
||||||
* we have to set is_not_null to true explicitly for identity columns
|
|
||||||
*/
|
|
||||||
ColumnDef *newColDef = copyObject(columnDefinition);
|
|
||||||
newColDef->constraints = NULL;
|
|
||||||
newColDef->is_not_null = true;
|
|
||||||
newCmd->def = (Node *) newColDef;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We check for ADD COLUMN .. SERIAL pseudo-type
|
* We check for ADD COLUMN .. SERIAL pseudo-type
|
||||||
|
@ -2539,34 +2516,6 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We check for ADD COLUMN .. GENERATED AS IDENTITY expr
|
|
||||||
* since it uses a seqeunce as an internal dependency
|
|
||||||
*/
|
|
||||||
constraint = NULL;
|
|
||||||
foreach_ptr(constraint, columnConstraints)
|
|
||||||
{
|
|
||||||
if (constraint->contype == CONSTR_IDENTITY)
|
|
||||||
{
|
|
||||||
AttrNumber attnum = get_attnum(relationId,
|
|
||||||
columnDefinition->colname);
|
|
||||||
bool missing_ok = false;
|
|
||||||
Oid seqOid = getIdentitySequence(relationId, attnum, missing_ok);
|
|
||||||
|
|
||||||
if (ShouldSyncTableMetadata(relationId))
|
|
||||||
{
|
|
||||||
needMetadataSyncForNewSequences = true;
|
|
||||||
alterTableDefaultNextvalCmd =
|
|
||||||
GetAddColumnWithNextvalDefaultCmd(seqOid,
|
|
||||||
relationId,
|
|
||||||
columnDefinition
|
|
||||||
->colname,
|
|
||||||
columnDefinition
|
|
||||||
->typeName);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* We check for ALTER COLUMN .. SET DEFAULT nextval('user_defined_seq')
|
* We check for ALTER COLUMN .. SET DEFAULT nextval('user_defined_seq')
|
||||||
|
@ -3222,6 +3171,17 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
||||||
{
|
{
|
||||||
if (columnConstraint->contype == CONSTR_IDENTITY)
|
if (columnConstraint->contype == CONSTR_IDENTITY)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* We currently don't support adding an identity column for an MX table
|
||||||
|
*/
|
||||||
|
if (ShouldSyncTableMetadata(relationId))
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||||
|
errmsg(
|
||||||
|
"cannot execute ADD COLUMN commands involving identity"
|
||||||
|
" columns when metadata is synchronized to workers")));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Currently we don't support backfilling the new identity column with default values
|
* Currently we don't support backfilling the new identity column with default values
|
||||||
* if the table is not empty
|
* if the table is not empty
|
||||||
|
@ -3352,7 +3312,8 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
||||||
*/
|
*/
|
||||||
AttrNumber attnum = get_attnum(relationId, command->name);
|
AttrNumber attnum = get_attnum(relationId, command->name);
|
||||||
List *seqInfoList = NIL;
|
List *seqInfoList = NIL;
|
||||||
GetDependentSequencesWithRelation(relationId, &seqInfoList, attnum);
|
GetDependentSequencesWithRelation(relationId, &seqInfoList, attnum,
|
||||||
|
DEPENDENCY_AUTO);
|
||||||
if (seqInfoList != NIL)
|
if (seqInfoList != NIL)
|
||||||
{
|
{
|
||||||
ereport(ERROR, (errmsg("cannot execute ALTER COLUMN TYPE .. command "
|
ereport(ERROR, (errmsg("cannot execute ALTER COLUMN TYPE .. command "
|
||||||
|
@ -4011,3 +3972,59 @@ MakeNameListFromRangeVar(const RangeVar *rel)
|
||||||
return list_make1(makeString(rel->relname));
|
return list_make1(makeString(rel->relname));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ErrorIfTableHasUnsupportedIdentityColumn errors out if the given table has any identity column other than bigint identity column.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
ErrorIfTableHasUnsupportedIdentityColumn(Oid relationId)
|
||||||
|
{
|
||||||
|
Relation relation = relation_open(relationId, AccessShareLock);
|
||||||
|
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
||||||
|
|
||||||
|
for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts;
|
||||||
|
attributeIndex++)
|
||||||
|
{
|
||||||
|
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
|
||||||
|
|
||||||
|
if (attributeForm->attidentity && attributeForm->atttypid != INT8OID)
|
||||||
|
{
|
||||||
|
char *qualifiedRelationName = generate_qualified_relation_name(relationId);
|
||||||
|
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||||
|
errmsg(
|
||||||
|
"cannot complete operation on %s with smallint/int identity column",
|
||||||
|
qualifiedRelationName),
|
||||||
|
errhint(
|
||||||
|
"Use bigint identity column instead.")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
relation_close(relation, NoLock);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ErrorIfTableHasIdentityColumn errors out if the given table has identity column
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
ErrorIfTableHasIdentityColumn(Oid relationId)
|
||||||
|
{
|
||||||
|
Relation relation = relation_open(relationId, AccessShareLock);
|
||||||
|
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
||||||
|
|
||||||
|
for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts;
|
||||||
|
attributeIndex++)
|
||||||
|
{
|
||||||
|
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
|
||||||
|
|
||||||
|
if (attributeForm->attidentity)
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||||
|
errmsg(
|
||||||
|
"cannot complete operation on a table with identity column")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
relation_close(relation, NoLock);
|
||||||
|
}
|
||||||
|
|
|
@ -186,7 +186,9 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
||||||
IsA(parsetree, ExecuteStmt) ||
|
IsA(parsetree, ExecuteStmt) ||
|
||||||
IsA(parsetree, PrepareStmt) ||
|
IsA(parsetree, PrepareStmt) ||
|
||||||
IsA(parsetree, DiscardStmt) ||
|
IsA(parsetree, DiscardStmt) ||
|
||||||
IsA(parsetree, DeallocateStmt))
|
IsA(parsetree, DeallocateStmt) ||
|
||||||
|
IsA(parsetree, DeclareCursorStmt) ||
|
||||||
|
IsA(parsetree, FetchStmt))
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Skip additional checks for common commands that do not have any
|
* Skip additional checks for common commands that do not have any
|
||||||
|
|
|
@ -675,14 +675,14 @@ PutRemoteCopyData(MultiConnection *connection, const char *buffer, int nbytes)
|
||||||
Assert(PQisnonblocking(pgConn));
|
Assert(PQisnonblocking(pgConn));
|
||||||
|
|
||||||
int copyState = PQputCopyData(pgConn, buffer, nbytes);
|
int copyState = PQputCopyData(pgConn, buffer, nbytes);
|
||||||
if (copyState == -1)
|
if (copyState <= 0)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PQputCopyData may have queued up part of the data even if it managed
|
* PQputCopyData may have queued up part of the data even if it managed
|
||||||
* to send some of it succesfully. We provide back pressure by waiting
|
* to send some of it successfully. We provide back pressure by waiting
|
||||||
* until the socket is writable to prevent the internal libpq buffers
|
* until the socket is writable to prevent the internal libpq buffers
|
||||||
* from growing excessively.
|
* from growing excessively.
|
||||||
*
|
*
|
||||||
|
|
|
@ -304,10 +304,7 @@ pg_get_sequencedef(Oid sequenceRelationId)
|
||||||
* When it's WORKER_NEXTVAL_SEQUENCE_DEFAULTS, the function creates the DEFAULT
|
* When it's WORKER_NEXTVAL_SEQUENCE_DEFAULTS, the function creates the DEFAULT
|
||||||
* clause using worker_nextval('sequence') and not nextval('sequence')
|
* clause using worker_nextval('sequence') and not nextval('sequence')
|
||||||
* When IncludeIdentities is NO_IDENTITY, the function does not include identity column
|
* When IncludeIdentities is NO_IDENTITY, the function does not include identity column
|
||||||
* specifications. When it's INCLUDE_IDENTITY_AS_SEQUENCE_DEFAULTS, the function
|
* specifications. When it's INCLUDE_IDENTITY it creates GENERATED .. AS IDENTIY clauses.
|
||||||
* uses sequences and set them as default values for identity columns by using exactly
|
|
||||||
* the same approach with worker_nextval('sequence') & nextval('sequence') logic
|
|
||||||
* desribed above. When it's INCLUDE_IDENTITY it creates GENERATED .. AS IDENTIY clauses.
|
|
||||||
*/
|
*/
|
||||||
char *
|
char *
|
||||||
pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
|
pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
|
||||||
|
@ -403,26 +400,9 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
|
||||||
Oid seqOid = getIdentitySequence(RelationGetRelid(relation),
|
Oid seqOid = getIdentitySequence(RelationGetRelid(relation),
|
||||||
attributeForm->attnum, missing_ok);
|
attributeForm->attnum, missing_ok);
|
||||||
|
|
||||||
char *sequenceName = generate_qualified_relation_name(seqOid);
|
if (includeIdentityDefaults == INCLUDE_IDENTITY)
|
||||||
|
|
||||||
if (includeIdentityDefaults == INCLUDE_IDENTITY_AS_SEQUENCE_DEFAULTS)
|
|
||||||
{
|
|
||||||
if (pg_get_sequencedef(seqOid)->seqtypid != INT8OID)
|
|
||||||
{
|
|
||||||
appendStringInfo(&buffer,
|
|
||||||
" DEFAULT worker_nextval(%s::regclass)",
|
|
||||||
quote_literal_cstr(sequenceName));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
appendStringInfo(&buffer, " DEFAULT nextval(%s::regclass)",
|
|
||||||
quote_literal_cstr(sequenceName));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (includeIdentityDefaults == INCLUDE_IDENTITY)
|
|
||||||
{
|
{
|
||||||
Form_pg_sequence pgSequenceForm = pg_get_sequencedef(seqOid);
|
Form_pg_sequence pgSequenceForm = pg_get_sequencedef(seqOid);
|
||||||
uint64 sequenceStart = nextval_internal(seqOid, false);
|
|
||||||
char *sequenceDef = psprintf(
|
char *sequenceDef = psprintf(
|
||||||
" GENERATED %s AS IDENTITY (INCREMENT BY " INT64_FORMAT \
|
" GENERATED %s AS IDENTITY (INCREMENT BY " INT64_FORMAT \
|
||||||
" MINVALUE " INT64_FORMAT " MAXVALUE "
|
" MINVALUE " INT64_FORMAT " MAXVALUE "
|
||||||
|
@ -433,7 +413,8 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
|
||||||
"ALWAYS" : "BY DEFAULT",
|
"ALWAYS" : "BY DEFAULT",
|
||||||
pgSequenceForm->seqincrement,
|
pgSequenceForm->seqincrement,
|
||||||
pgSequenceForm->seqmin,
|
pgSequenceForm->seqmin,
|
||||||
pgSequenceForm->seqmax, sequenceStart,
|
pgSequenceForm->seqmax,
|
||||||
|
pgSequenceForm->seqstart,
|
||||||
pgSequenceForm->seqcache,
|
pgSequenceForm->seqcache,
|
||||||
pgSequenceForm->seqcycle ? "" : "NO ");
|
pgSequenceForm->seqcycle ? "" : "NO ");
|
||||||
|
|
||||||
|
@ -1391,7 +1372,7 @@ convert_aclright_to_string(int aclright)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* contain_nextval_expression_walker walks over expression tree and returns
|
* contain_nextval_expression_walker walks over expression tree and returns
|
||||||
* true if it contains call to 'nextval' function.
|
* true if it contains call to 'nextval' function or it has an identity column.
|
||||||
*/
|
*/
|
||||||
bool
|
bool
|
||||||
contain_nextval_expression_walker(Node *node, void *context)
|
contain_nextval_expression_walker(Node *node, void *context)
|
||||||
|
@ -1401,6 +1382,13 @@ contain_nextval_expression_walker(Node *node, void *context)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* check if the node contains an identity column */
|
||||||
|
if (IsA(node, NextValueExpr))
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* check if the node contains call to 'nextval' */
|
||||||
if (IsA(node, FuncExpr))
|
if (IsA(node, FuncExpr))
|
||||||
{
|
{
|
||||||
FuncExpr *funcExpr = (FuncExpr *) node;
|
FuncExpr *funcExpr = (FuncExpr *) node;
|
||||||
|
|
|
@ -1406,8 +1406,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
||||||
|
|
||||||
/* Assert we processed the right number of columns */
|
/* Assert we processed the right number of columns */
|
||||||
#ifdef USE_ASSERT_CHECKING
|
#ifdef USE_ASSERT_CHECKING
|
||||||
while (i < colinfo->num_cols && colinfo->colnames[i] == NULL)
|
for (int col_index = 0; col_index < colinfo->num_cols; col_index++)
|
||||||
i++;
|
{
|
||||||
|
/*
|
||||||
|
* In the above processing-loops, "i" advances only if
|
||||||
|
* the column is not new, check if this is a new column.
|
||||||
|
*/
|
||||||
|
if (colinfo->is_new_col[col_index])
|
||||||
|
i++;
|
||||||
|
}
|
||||||
Assert(i == colinfo->num_cols);
|
Assert(i == colinfo->num_cols);
|
||||||
Assert(j == nnewcolumns);
|
Assert(j == nnewcolumns);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1529,8 +1529,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
||||||
|
|
||||||
/* Assert we processed the right number of columns */
|
/* Assert we processed the right number of columns */
|
||||||
#ifdef USE_ASSERT_CHECKING
|
#ifdef USE_ASSERT_CHECKING
|
||||||
while (i < colinfo->num_cols && colinfo->colnames[i] == NULL)
|
for (int col_index = 0; col_index < colinfo->num_cols; col_index++)
|
||||||
i++;
|
{
|
||||||
|
/*
|
||||||
|
* In the above processing-loops, "i" advances only if
|
||||||
|
* the column is not new, check if this is a new column.
|
||||||
|
*/
|
||||||
|
if (colinfo->is_new_col[col_index])
|
||||||
|
i++;
|
||||||
|
}
|
||||||
Assert(i == colinfo->num_cols);
|
Assert(i == colinfo->num_cols);
|
||||||
Assert(j == nnewcolumns);
|
Assert(j == nnewcolumns);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1565,8 +1565,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
|
||||||
|
|
||||||
/* Assert we processed the right number of columns */
|
/* Assert we processed the right number of columns */
|
||||||
#ifdef USE_ASSERT_CHECKING
|
#ifdef USE_ASSERT_CHECKING
|
||||||
while (i < colinfo->num_cols && colinfo->colnames[i] == NULL)
|
for (int col_index = 0; col_index < colinfo->num_cols; col_index++)
|
||||||
i++;
|
{
|
||||||
|
/*
|
||||||
|
* In the above processing-loops, "i" advances only if
|
||||||
|
* the column is not new, check if this is a new column.
|
||||||
|
*/
|
||||||
|
if (colinfo->is_new_col[col_index])
|
||||||
|
i++;
|
||||||
|
}
|
||||||
Assert(i == colinfo->num_cols);
|
Assert(i == colinfo->num_cols);
|
||||||
Assert(j == nnewcolumns);
|
Assert(j == nnewcolumns);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -522,7 +522,9 @@ typedef enum TaskExecutionState
|
||||||
/*
|
/*
|
||||||
* PlacementExecutionOrder indicates whether a command should be executed
|
* PlacementExecutionOrder indicates whether a command should be executed
|
||||||
* on any replica, on all replicas sequentially (in order), or on all
|
* on any replica, on all replicas sequentially (in order), or on all
|
||||||
* replicas in parallel.
|
* replicas in parallel. In other words, EXECUTION_ORDER_ANY is used for
|
||||||
|
* SELECTs, EXECUTION_ORDER_SEQUENTIAL/EXECUTION_ORDER_PARALLEL is used for
|
||||||
|
* DML/DDL.
|
||||||
*/
|
*/
|
||||||
typedef enum PlacementExecutionOrder
|
typedef enum PlacementExecutionOrder
|
||||||
{
|
{
|
||||||
|
@ -4777,6 +4779,7 @@ ReceiveResults(WorkerSession *session, bool storeRows)
|
||||||
TupleDesc tupleDescriptor = tupleDest->tupleDescForQuery(tupleDest, queryIndex);
|
TupleDesc tupleDescriptor = tupleDest->tupleDescForQuery(tupleDest, queryIndex);
|
||||||
if (tupleDescriptor == NULL)
|
if (tupleDescriptor == NULL)
|
||||||
{
|
{
|
||||||
|
PQclear(result);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5509,6 +5512,10 @@ TaskExecutionStateMachine(ShardCommandExecution *shardCommandExecution)
|
||||||
{
|
{
|
||||||
currentTaskExecutionState = TASK_EXECUTION_FAILED;
|
currentTaskExecutionState = TASK_EXECUTION_FAILED;
|
||||||
}
|
}
|
||||||
|
else if (executionOrder != EXECUTION_ORDER_ANY && failedPlacementCount > 0)
|
||||||
|
{
|
||||||
|
currentTaskExecutionState = TASK_EXECUTION_FAILED;
|
||||||
|
}
|
||||||
else if (executionOrder == EXECUTION_ORDER_ANY && donePlacementCount > 0)
|
else if (executionOrder == EXECUTION_ORDER_ANY && donePlacementCount > 0)
|
||||||
{
|
{
|
||||||
currentTaskExecutionState = TASK_EXECUTION_FINISHED;
|
currentTaskExecutionState = TASK_EXECUTION_FINISHED;
|
||||||
|
|
|
@ -781,7 +781,19 @@ CitusEndScan(CustomScanState *node)
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
CitusReScan(CustomScanState *node)
|
CitusReScan(CustomScanState *node)
|
||||||
{ }
|
{
|
||||||
|
if (node->ss.ps.ps_ResultTupleSlot)
|
||||||
|
{
|
||||||
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
||||||
|
}
|
||||||
|
ExecScanReScan(&node->ss);
|
||||||
|
|
||||||
|
CitusScanState *scanState = (CitusScanState *) node;
|
||||||
|
if (scanState->tuplestorestate)
|
||||||
|
{
|
||||||
|
tuplestore_rescan(scanState->tuplestorestate);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1834,7 +1834,7 @@ static List *
|
||||||
GetRelationSequenceDependencyList(Oid relationId)
|
GetRelationSequenceDependencyList(Oid relationId)
|
||||||
{
|
{
|
||||||
List *seqInfoList = NIL;
|
List *seqInfoList = NIL;
|
||||||
GetDependentSequencesWithRelation(relationId, &seqInfoList, 0);
|
GetDependentSequencesWithRelation(relationId, &seqInfoList, 0, DEPENDENCY_AUTO);
|
||||||
|
|
||||||
List *seqIdList = NIL;
|
List *seqIdList = NIL;
|
||||||
SequenceInfo *seqInfo = NULL;
|
SequenceInfo *seqInfo = NULL;
|
||||||
|
|
|
@ -1586,10 +1586,13 @@ GetAttributeTypeOid(Oid relationId, AttrNumber attnum)
|
||||||
* For both cases, we use the intermediate AttrDefault object from pg_depend.
|
* For both cases, we use the intermediate AttrDefault object from pg_depend.
|
||||||
* If attnum is specified, we only return the sequences related to that
|
* If attnum is specified, we only return the sequences related to that
|
||||||
* attribute of the relationId.
|
* attribute of the relationId.
|
||||||
|
* See DependencyType for the possible values of depType.
|
||||||
|
* We use DEPENDENCY_INTERNAL for sequences created by identity column.
|
||||||
|
* DEPENDENCY_AUTO for regular sequences.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList,
|
GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList,
|
||||||
AttrNumber attnum)
|
AttrNumber attnum, char depType)
|
||||||
{
|
{
|
||||||
Assert(*seqInfoList == NIL);
|
Assert(*seqInfoList == NIL);
|
||||||
|
|
||||||
|
@ -1626,7 +1629,7 @@ GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList,
|
||||||
if (deprec->classid == AttrDefaultRelationId &&
|
if (deprec->classid == AttrDefaultRelationId &&
|
||||||
deprec->objsubid == 0 &&
|
deprec->objsubid == 0 &&
|
||||||
deprec->refobjsubid != 0 &&
|
deprec->refobjsubid != 0 &&
|
||||||
deprec->deptype == DEPENDENCY_AUTO)
|
deprec->deptype == depType)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* We are going to generate corresponding SequenceInfo
|
* We are going to generate corresponding SequenceInfo
|
||||||
|
@ -1635,8 +1638,7 @@ GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList,
|
||||||
attrdefResult = lappend_oid(attrdefResult, deprec->objid);
|
attrdefResult = lappend_oid(attrdefResult, deprec->objid);
|
||||||
attrdefAttnumResult = lappend_int(attrdefAttnumResult, deprec->refobjsubid);
|
attrdefAttnumResult = lappend_int(attrdefAttnumResult, deprec->refobjsubid);
|
||||||
}
|
}
|
||||||
else if ((deprec->deptype == DEPENDENCY_AUTO || deprec->deptype ==
|
else if (deprec->deptype == depType &&
|
||||||
DEPENDENCY_INTERNAL) &&
|
|
||||||
deprec->refobjsubid != 0 &&
|
deprec->refobjsubid != 0 &&
|
||||||
deprec->classid == RelationRelationId &&
|
deprec->classid == RelationRelationId &&
|
||||||
get_rel_relkind(deprec->objid) == RELKIND_SEQUENCE)
|
get_rel_relkind(deprec->objid) == RELKIND_SEQUENCE)
|
||||||
|
@ -1883,6 +1885,53 @@ SequenceDependencyCommandList(Oid relationId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IdentitySequenceDependencyCommandList generate a command to execute
|
||||||
|
* a UDF (WORKER_ADJUST_IDENTITY_COLUMN_SEQ_RANGES) on workers to modify the identity
|
||||||
|
* columns min/max values to produce unique values on workers.
|
||||||
|
*/
|
||||||
|
List *
|
||||||
|
IdentitySequenceDependencyCommandList(Oid targetRelationId)
|
||||||
|
{
|
||||||
|
List *commandList = NIL;
|
||||||
|
|
||||||
|
Relation relation = relation_open(targetRelationId, AccessShareLock);
|
||||||
|
TupleDesc tupleDescriptor = RelationGetDescr(relation);
|
||||||
|
|
||||||
|
bool tableHasIdentityColumn = false;
|
||||||
|
for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts;
|
||||||
|
attributeIndex++)
|
||||||
|
{
|
||||||
|
Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, attributeIndex);
|
||||||
|
|
||||||
|
if (attributeForm->attidentity)
|
||||||
|
{
|
||||||
|
tableHasIdentityColumn = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
relation_close(relation, NoLock);
|
||||||
|
|
||||||
|
if (tableHasIdentityColumn)
|
||||||
|
{
|
||||||
|
StringInfo stringInfo = makeStringInfo();
|
||||||
|
char *tableName = generate_qualified_relation_name(targetRelationId);
|
||||||
|
|
||||||
|
appendStringInfo(stringInfo,
|
||||||
|
WORKER_ADJUST_IDENTITY_COLUMN_SEQ_RANGES,
|
||||||
|
quote_literal_cstr(tableName));
|
||||||
|
|
||||||
|
|
||||||
|
commandList = lappend(commandList,
|
||||||
|
makeTableDDLCommandString(
|
||||||
|
stringInfo->data));
|
||||||
|
}
|
||||||
|
|
||||||
|
return commandList;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CreateSequenceDependencyCommand generates a query string for calling
|
* CreateSequenceDependencyCommand generates a query string for calling
|
||||||
* worker_record_sequence_dependency on the worker to recreate a sequence->table
|
* worker_record_sequence_dependency on the worker to recreate a sequence->table
|
||||||
|
@ -2605,8 +2654,7 @@ CreateShellTableOnWorkers(Oid relationId)
|
||||||
List *commandList = list_make1(DISABLE_DDL_PROPAGATION);
|
List *commandList = list_make1(DISABLE_DDL_PROPAGATION);
|
||||||
|
|
||||||
IncludeSequenceDefaults includeSequenceDefaults = WORKER_NEXTVAL_SEQUENCE_DEFAULTS;
|
IncludeSequenceDefaults includeSequenceDefaults = WORKER_NEXTVAL_SEQUENCE_DEFAULTS;
|
||||||
IncludeIdentities includeIdentityDefaults =
|
IncludeIdentities includeIdentityDefaults = INCLUDE_IDENTITY;
|
||||||
INCLUDE_IDENTITY_AS_SEQUENCE_DEFAULTS;
|
|
||||||
|
|
||||||
bool creatingShellTableOnRemoteNode = true;
|
bool creatingShellTableOnRemoteNode = true;
|
||||||
List *tableDDLCommands = GetFullTableCreationCommands(relationId,
|
List *tableDDLCommands = GetFullTableCreationCommands(relationId,
|
||||||
|
|
|
@ -985,7 +985,7 @@ AppendShardSizeQuery(StringInfo selectQuery, ShardInterval *shardInterval)
|
||||||
|
|
||||||
appendStringInfo(selectQuery, "SELECT " UINT64_FORMAT " AS shard_id, ", shardId);
|
appendStringInfo(selectQuery, "SELECT " UINT64_FORMAT " AS shard_id, ", shardId);
|
||||||
appendStringInfo(selectQuery, "%s AS shard_name, ", quotedShardName);
|
appendStringInfo(selectQuery, "%s AS shard_name, ", quotedShardName);
|
||||||
appendStringInfo(selectQuery, PG_RELATION_SIZE_FUNCTION, quotedShardName);
|
appendStringInfo(selectQuery, PG_TOTAL_RELATION_SIZE_FUNCTION, quotedShardName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -461,10 +461,7 @@ ResolveRelationId(text *relationName, bool missingOk)
|
||||||
* definition, optional column storage and statistics definitions, and index
|
* definition, optional column storage and statistics definitions, and index
|
||||||
* constraint and trigger definitions.
|
* constraint and trigger definitions.
|
||||||
* When IncludeIdentities is NO_IDENTITY, the function does not include identity column
|
* When IncludeIdentities is NO_IDENTITY, the function does not include identity column
|
||||||
* specifications. When it's INCLUDE_IDENTITY_AS_SEQUENCE_DEFAULTS, the function
|
* specifications. When it's INCLUDE_IDENTITY it creates GENERATED .. AS IDENTIY clauses.
|
||||||
* uses sequences and set them as default values for identity columns by using exactly
|
|
||||||
* the same approach with worker_nextval('sequence') & nextval('sequence') logic
|
|
||||||
* desribed above. When it's INCLUDE_IDENTITY it creates GENERATED .. AS IDENTIY clauses.
|
|
||||||
*/
|
*/
|
||||||
List *
|
List *
|
||||||
GetFullTableCreationCommands(Oid relationId,
|
GetFullTableCreationCommands(Oid relationId,
|
||||||
|
@ -500,6 +497,15 @@ GetFullTableCreationCommands(Oid relationId,
|
||||||
tableDDLEventList = lappend(tableDDLEventList,
|
tableDDLEventList = lappend(tableDDLEventList,
|
||||||
truncateTriggerCommand);
|
truncateTriggerCommand);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For identity column sequences, we only need to modify
|
||||||
|
* their min/max values to produce unique values on the worker nodes.
|
||||||
|
*/
|
||||||
|
List *identitySequenceDependencyCommandList =
|
||||||
|
IdentitySequenceDependencyCommandList(relationId);
|
||||||
|
tableDDLEventList = list_concat(tableDDLEventList,
|
||||||
|
identitySequenceDependencyCommandList);
|
||||||
}
|
}
|
||||||
|
|
||||||
tableDDLEventList = list_concat(tableDDLEventList, postLoadCreationCommandList);
|
tableDDLEventList = list_concat(tableDDLEventList, postLoadCreationCommandList);
|
||||||
|
|
|
@ -1949,7 +1949,9 @@ RebalanceTableShardsBackground(RebalanceOptions *options, Oid shardReplicationMo
|
||||||
appendStringInfo(&buf,
|
appendStringInfo(&buf,
|
||||||
"SELECT pg_catalog.replicate_reference_tables(%s)",
|
"SELECT pg_catalog.replicate_reference_tables(%s)",
|
||||||
quote_literal_cstr(shardTranferModeLabel));
|
quote_literal_cstr(shardTranferModeLabel));
|
||||||
BackgroundTask *task = ScheduleBackgroundTask(jobId, GetUserId(), buf.data,
|
|
||||||
|
Oid superUserId = CitusExtensionOwner();
|
||||||
|
BackgroundTask *task = ScheduleBackgroundTask(jobId, superUserId, buf.data,
|
||||||
prevJobIdx, prevJobId);
|
prevJobIdx, prevJobId);
|
||||||
prevJobId[prevJobIdx] = task->taskid;
|
prevJobId[prevJobIdx] = task->taskid;
|
||||||
prevJobIdx++;
|
prevJobIdx++;
|
||||||
|
@ -2034,7 +2036,7 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent,
|
||||||
if (updateType == PLACEMENT_UPDATE_MOVE)
|
if (updateType == PLACEMENT_UPDATE_MOVE)
|
||||||
{
|
{
|
||||||
appendStringInfo(placementUpdateCommand,
|
appendStringInfo(placementUpdateCommand,
|
||||||
"SELECT citus_move_shard_placement(%ld,%u,%u,%s)",
|
"SELECT pg_catalog.citus_move_shard_placement(%ld,%u,%u,%s)",
|
||||||
shardId,
|
shardId,
|
||||||
sourceNode->nodeId,
|
sourceNode->nodeId,
|
||||||
targetNode->nodeId,
|
targetNode->nodeId,
|
||||||
|
@ -2043,7 +2045,7 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent,
|
||||||
else if (updateType == PLACEMENT_UPDATE_COPY)
|
else if (updateType == PLACEMENT_UPDATE_COPY)
|
||||||
{
|
{
|
||||||
appendStringInfo(placementUpdateCommand,
|
appendStringInfo(placementUpdateCommand,
|
||||||
"SELECT citus_copy_shard_placement(%ld,%u,%u,%s)",
|
"SELECT pg_catalog.citus_copy_shard_placement(%ld,%u,%u,%s)",
|
||||||
shardId,
|
shardId,
|
||||||
sourceNode->nodeId,
|
sourceNode->nodeId,
|
||||||
targetNode->nodeId,
|
targetNode->nodeId,
|
||||||
|
|
|
@ -1810,7 +1810,7 @@ CreateWorkerForPlacementSet(List *workersForPlacementList)
|
||||||
/* we don't have value field as it's a set */
|
/* we don't have value field as it's a set */
|
||||||
info.entrysize = info.keysize;
|
info.entrysize = info.keysize;
|
||||||
|
|
||||||
uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE);
|
||||||
|
|
||||||
HTAB *workerForPlacementSet = hash_create("worker placement set", 32, &info,
|
HTAB *workerForPlacementSet = hash_create("worker placement set", 32, &info,
|
||||||
hashFlags);
|
hashFlags);
|
||||||
|
|
|
@ -53,8 +53,14 @@ worker_copy_table_to_node(PG_FUNCTION_ARGS)
|
||||||
targetNodeId);
|
targetNodeId);
|
||||||
|
|
||||||
StringInfo selectShardQueryForCopy = makeStringInfo();
|
StringInfo selectShardQueryForCopy = makeStringInfo();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Even though we do COPY(SELECT ...) all the columns, we can't just do SELECT * because we need to not COPY generated colums.
|
||||||
|
*/
|
||||||
|
const char *columnList = CopyableColumnNamesFromRelationName(relationSchemaName,
|
||||||
|
relationName);
|
||||||
appendStringInfo(selectShardQueryForCopy,
|
appendStringInfo(selectShardQueryForCopy,
|
||||||
"SELECT * FROM %s;", relationQualifiedName);
|
"SELECT %s FROM %s;", columnList, relationQualifiedName);
|
||||||
|
|
||||||
ParamListInfo params = NULL;
|
ParamListInfo params = NULL;
|
||||||
ExecuteQueryStringIntoDestReceiver(selectShardQueryForCopy->data, params,
|
ExecuteQueryStringIntoDestReceiver(selectShardQueryForCopy->data, params,
|
||||||
|
|
|
@ -73,7 +73,7 @@ static void ShardCopyDestReceiverDestroy(DestReceiver *destReceiver);
|
||||||
static bool CanUseLocalCopy(uint32_t destinationNodeId);
|
static bool CanUseLocalCopy(uint32_t destinationNodeId);
|
||||||
static StringInfo ConstructShardCopyStatement(List *destinationShardFullyQualifiedName,
|
static StringInfo ConstructShardCopyStatement(List *destinationShardFullyQualifiedName,
|
||||||
bool
|
bool
|
||||||
useBinaryFormat);
|
useBinaryFormat, TupleDesc tupleDesc);
|
||||||
static void WriteLocalTuple(TupleTableSlot *slot, ShardCopyDestReceiver *copyDest);
|
static void WriteLocalTuple(TupleTableSlot *slot, ShardCopyDestReceiver *copyDest);
|
||||||
static int ReadFromLocalBufferCallback(void *outBuf, int minRead, int maxRead);
|
static int ReadFromLocalBufferCallback(void *outBuf, int minRead, int maxRead);
|
||||||
static void LocalCopyToShard(ShardCopyDestReceiver *copyDest, CopyOutState
|
static void LocalCopyToShard(ShardCopyDestReceiver *copyDest, CopyOutState
|
||||||
|
@ -105,7 +105,8 @@ ConnectToRemoteAndStartCopy(ShardCopyDestReceiver *copyDest)
|
||||||
|
|
||||||
StringInfo copyStatement = ConstructShardCopyStatement(
|
StringInfo copyStatement = ConstructShardCopyStatement(
|
||||||
copyDest->destinationShardFullyQualifiedName,
|
copyDest->destinationShardFullyQualifiedName,
|
||||||
copyDest->copyOutState->binary);
|
copyDest->copyOutState->binary,
|
||||||
|
copyDest->tupleDescriptor);
|
||||||
|
|
||||||
if (!SendRemoteCommand(copyDest->connection, copyStatement->data))
|
if (!SendRemoteCommand(copyDest->connection, copyStatement->data))
|
||||||
{
|
{
|
||||||
|
@ -344,21 +345,80 @@ ShardCopyDestReceiverDestroy(DestReceiver *dest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CopyableColumnNamesFromTupleDesc function creates and returns a comma seperated column names string to be used in COPY
|
||||||
|
* and SELECT statements when copying a table. The COPY and SELECT statements should filter out the GENERATED columns since COPY
|
||||||
|
* statement fails to handle them. Iterating over the attributes of the table we also need to skip the dropped columns.
|
||||||
|
*/
|
||||||
|
const char *
|
||||||
|
CopyableColumnNamesFromTupleDesc(TupleDesc tupDesc)
|
||||||
|
{
|
||||||
|
StringInfo columnList = makeStringInfo();
|
||||||
|
bool firstInList = true;
|
||||||
|
|
||||||
|
for (int i = 0; i < tupDesc->natts; i++)
|
||||||
|
{
|
||||||
|
Form_pg_attribute att = TupleDescAttr(tupDesc, i);
|
||||||
|
if (att->attgenerated || att->attisdropped)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (!firstInList)
|
||||||
|
{
|
||||||
|
appendStringInfo(columnList, ",");
|
||||||
|
}
|
||||||
|
|
||||||
|
firstInList = false;
|
||||||
|
|
||||||
|
appendStringInfo(columnList, "%s", quote_identifier(NameStr(att->attname)));
|
||||||
|
}
|
||||||
|
|
||||||
|
return columnList->data;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CopyableColumnNamesFromRelationName function is a wrapper for CopyableColumnNamesFromTupleDesc.
|
||||||
|
*/
|
||||||
|
const char *
|
||||||
|
CopyableColumnNamesFromRelationName(const char *schemaName, const char *relationName)
|
||||||
|
{
|
||||||
|
Oid namespaceOid = get_namespace_oid(schemaName, true);
|
||||||
|
|
||||||
|
Oid relationId = get_relname_relid(relationName, namespaceOid);
|
||||||
|
|
||||||
|
Relation relation = relation_open(relationId, AccessShareLock);
|
||||||
|
|
||||||
|
TupleDesc tupleDesc = RelationGetDescr(relation);
|
||||||
|
|
||||||
|
const char *columnList = CopyableColumnNamesFromTupleDesc(tupleDesc);
|
||||||
|
|
||||||
|
relation_close(relation, NoLock);
|
||||||
|
|
||||||
|
return columnList;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ConstructShardCopyStatement constructs the text of a COPY statement
|
* ConstructShardCopyStatement constructs the text of a COPY statement
|
||||||
* for copying into a result table
|
* for copying into a result table
|
||||||
*/
|
*/
|
||||||
static StringInfo
|
static StringInfo
|
||||||
ConstructShardCopyStatement(List *destinationShardFullyQualifiedName, bool
|
ConstructShardCopyStatement(List *destinationShardFullyQualifiedName, bool
|
||||||
useBinaryFormat)
|
useBinaryFormat,
|
||||||
|
TupleDesc tupleDesc)
|
||||||
{
|
{
|
||||||
char *destinationShardSchemaName = linitial(destinationShardFullyQualifiedName);
|
char *destinationShardSchemaName = linitial(destinationShardFullyQualifiedName);
|
||||||
char *destinationShardRelationName = lsecond(destinationShardFullyQualifiedName);
|
char *destinationShardRelationName = lsecond(destinationShardFullyQualifiedName);
|
||||||
|
|
||||||
|
|
||||||
StringInfo command = makeStringInfo();
|
StringInfo command = makeStringInfo();
|
||||||
appendStringInfo(command, "COPY %s.%s FROM STDIN",
|
|
||||||
|
const char *columnList = CopyableColumnNamesFromTupleDesc(tupleDesc);
|
||||||
|
|
||||||
|
appendStringInfo(command, "COPY %s.%s (%s) FROM STDIN",
|
||||||
quote_identifier(destinationShardSchemaName), quote_identifier(
|
quote_identifier(destinationShardSchemaName), quote_identifier(
|
||||||
destinationShardRelationName));
|
destinationShardRelationName), columnList);
|
||||||
|
|
||||||
if (useBinaryFormat)
|
if (useBinaryFormat)
|
||||||
{
|
{
|
||||||
|
|
|
@ -110,8 +110,13 @@ worker_split_copy(PG_FUNCTION_ARGS)
|
||||||
splitCopyInfoList))));
|
splitCopyInfoList))));
|
||||||
|
|
||||||
StringInfo selectShardQueryForCopy = makeStringInfo();
|
StringInfo selectShardQueryForCopy = makeStringInfo();
|
||||||
|
const char *columnList = CopyableColumnNamesFromRelationName(
|
||||||
|
sourceShardToCopySchemaName,
|
||||||
|
sourceShardToCopyName);
|
||||||
|
|
||||||
appendStringInfo(selectShardQueryForCopy,
|
appendStringInfo(selectShardQueryForCopy,
|
||||||
"SELECT * FROM %s;", sourceShardToCopyQualifiedName);
|
"SELECT %s FROM %s;", columnList,
|
||||||
|
sourceShardToCopyQualifiedName);
|
||||||
|
|
||||||
ParamListInfo params = NULL;
|
ParamListInfo params = NULL;
|
||||||
ExecuteQueryStringIntoDestReceiver(selectShardQueryForCopy->data, params,
|
ExecuteQueryStringIntoDestReceiver(selectShardQueryForCopy->data, params,
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
-- citus--11.2-1--11.2-2
|
||||||
|
-- Since we backported the UDF below from version 11.3,
|
||||||
|
-- the version portion of this file does not match with
|
||||||
|
-- the version of the included file.
|
||||||
|
#include "udfs/worker_adjust_identity_column_seq_ranges/11.3-1.sql"
|
|
@ -0,0 +1,2 @@
|
||||||
|
-- citus--11.2-2--11.2-1
|
||||||
|
DROP FUNCTION IF EXISTS pg_catalog.worker_adjust_identity_column_seq_ranges(regclass);
|
7
src/backend/distributed/sql/udfs/worker_adjust_identity_column_seq_ranges/11.3-1.sql
generated
Normal file
7
src/backend/distributed/sql/udfs/worker_adjust_identity_column_seq_ranges/11.3-1.sql
generated
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
CREATE OR REPLACE FUNCTION pg_catalog.worker_adjust_identity_column_seq_ranges(regclass)
|
||||||
|
RETURNS VOID
|
||||||
|
LANGUAGE C STRICT
|
||||||
|
AS 'MODULE_PATHNAME', $$worker_adjust_identity_column_seq_ranges$$;
|
||||||
|
COMMENT ON FUNCTION pg_catalog.worker_adjust_identity_column_seq_ranges(regclass)
|
||||||
|
IS 'modify identity column seq ranges to produce globally unique values';
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
CREATE OR REPLACE FUNCTION pg_catalog.worker_adjust_identity_column_seq_ranges(regclass)
|
||||||
|
RETURNS VOID
|
||||||
|
LANGUAGE C STRICT
|
||||||
|
AS 'MODULE_PATHNAME', $$worker_adjust_identity_column_seq_ranges$$;
|
||||||
|
COMMENT ON FUNCTION pg_catalog.worker_adjust_identity_column_seq_ranges(regclass)
|
||||||
|
IS 'modify identity column seq ranges to produce globally unique values';
|
||||||
|
|
|
@ -1,70 +0,0 @@
|
||||||
/*-------------------------------------------------------------------------
|
|
||||||
*
|
|
||||||
* pg_send_cancellation.c
|
|
||||||
*
|
|
||||||
* This file contains functions to test setting pg_send_cancellation.
|
|
||||||
*
|
|
||||||
* Copyright (c) Citus Data, Inc.
|
|
||||||
*
|
|
||||||
*-------------------------------------------------------------------------
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "postgres.h"
|
|
||||||
#include "miscadmin.h"
|
|
||||||
#include "fmgr.h"
|
|
||||||
#include "port.h"
|
|
||||||
|
|
||||||
#include "postmaster/postmaster.h"
|
|
||||||
|
|
||||||
|
|
||||||
#define PG_SEND_CANCELLATION_VERSION \
|
|
||||||
"pg_send_cancellation (PostgreSQL) " PG_VERSION "\n"
|
|
||||||
|
|
||||||
|
|
||||||
/* exports for SQL callable functions */
|
|
||||||
PG_FUNCTION_INFO_V1(get_cancellation_key);
|
|
||||||
PG_FUNCTION_INFO_V1(run_pg_send_cancellation);
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* get_cancellation_key returns the cancellation key of the current process
|
|
||||||
* as an integer.
|
|
||||||
*/
|
|
||||||
Datum
|
|
||||||
get_cancellation_key(PG_FUNCTION_ARGS)
|
|
||||||
{
|
|
||||||
PG_RETURN_INT32(MyCancelKey);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* run_pg_send_cancellation runs the pg_send_cancellation program with
|
|
||||||
* the specified arguments
|
|
||||||
*/
|
|
||||||
Datum
|
|
||||||
run_pg_send_cancellation(PG_FUNCTION_ARGS)
|
|
||||||
{
|
|
||||||
int pid = PG_GETARG_INT32(0);
|
|
||||||
int cancelKey = PG_GETARG_INT32(1);
|
|
||||||
|
|
||||||
char sendCancellationPath[MAXPGPATH];
|
|
||||||
char command[1024];
|
|
||||||
|
|
||||||
/* Locate executable backend before we change working directory */
|
|
||||||
if (find_other_exec(my_exec_path, "pg_send_cancellation",
|
|
||||||
PG_SEND_CANCELLATION_VERSION,
|
|
||||||
sendCancellationPath) < 0)
|
|
||||||
{
|
|
||||||
ereport(ERROR, (errmsg("could not locate pg_send_cancellation")));
|
|
||||||
}
|
|
||||||
|
|
||||||
pg_snprintf(command, sizeof(command), "%s %d %d %s %d",
|
|
||||||
sendCancellationPath, pid, cancelKey, "localhost", PostPortNumber);
|
|
||||||
|
|
||||||
if (system(command) != 0)
|
|
||||||
{
|
|
||||||
ereport(ERROR, (errmsg("failed to run command: %s", command)));
|
|
||||||
}
|
|
||||||
|
|
||||||
PG_RETURN_VOID();
|
|
||||||
}
|
|
|
@ -462,21 +462,25 @@ HasDropCommandViolatesOwnership(Node *node)
|
||||||
static bool
|
static bool
|
||||||
AnyObjectViolatesOwnership(DropStmt *dropStmt)
|
AnyObjectViolatesOwnership(DropStmt *dropStmt)
|
||||||
{
|
{
|
||||||
|
bool hasOwnershipViolation = false;
|
||||||
volatile ObjectAddress objectAddress = { 0 };
|
volatile ObjectAddress objectAddress = { 0 };
|
||||||
Relation relation = NULL;
|
Relation relation = NULL;
|
||||||
bool objectViolatesOwnership = false;
|
|
||||||
ObjectType objectType = dropStmt->removeType;
|
ObjectType objectType = dropStmt->removeType;
|
||||||
bool missingOk = dropStmt->missing_ok;
|
bool missingOk = dropStmt->missing_ok;
|
||||||
|
|
||||||
Node *object = NULL;
|
MemoryContext savedContext = CurrentMemoryContext;
|
||||||
foreach_ptr(object, dropStmt->objects)
|
ResourceOwner savedOwner = CurrentResourceOwner;
|
||||||
|
BeginInternalSubTransaction(NULL);
|
||||||
|
MemoryContextSwitchTo(savedContext);
|
||||||
|
|
||||||
|
PG_TRY();
|
||||||
{
|
{
|
||||||
PG_TRY();
|
Node *object = NULL;
|
||||||
|
foreach_ptr(object, dropStmt->objects)
|
||||||
{
|
{
|
||||||
objectAddress = get_object_address(objectType, object,
|
objectAddress = get_object_address(objectType, object,
|
||||||
&relation, AccessShareLock, missingOk);
|
&relation, AccessShareLock, missingOk);
|
||||||
|
|
||||||
|
|
||||||
if (OidIsValid(objectAddress.objectId))
|
if (OidIsValid(objectAddress.objectId))
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -487,29 +491,39 @@ AnyObjectViolatesOwnership(DropStmt *dropStmt)
|
||||||
objectAddress,
|
objectAddress,
|
||||||
object, relation);
|
object, relation);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
PG_CATCH();
|
if (relation != NULL)
|
||||||
{
|
|
||||||
if (OidIsValid(objectAddress.objectId))
|
|
||||||
{
|
{
|
||||||
/* ownership violation */
|
relation_close(relation, NoLock);
|
||||||
objectViolatesOwnership = true;
|
relation = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
PG_END_TRY();
|
|
||||||
|
|
||||||
|
ReleaseCurrentSubTransaction();
|
||||||
|
MemoryContextSwitchTo(savedContext);
|
||||||
|
CurrentResourceOwner = savedOwner;
|
||||||
|
}
|
||||||
|
PG_CATCH();
|
||||||
|
{
|
||||||
|
MemoryContextSwitchTo(savedContext);
|
||||||
|
ErrorData *edata = CopyErrorData();
|
||||||
|
FlushErrorState();
|
||||||
|
|
||||||
|
hasOwnershipViolation = true;
|
||||||
if (relation != NULL)
|
if (relation != NULL)
|
||||||
{
|
{
|
||||||
relation_close(relation, AccessShareLock);
|
relation_close(relation, NoLock);
|
||||||
relation = NULL;
|
relation = NULL;
|
||||||
}
|
}
|
||||||
|
RollbackAndReleaseCurrentSubTransaction();
|
||||||
|
MemoryContextSwitchTo(savedContext);
|
||||||
|
CurrentResourceOwner = savedOwner;
|
||||||
|
|
||||||
/* we found ownership violation, so can return here */
|
/* Rethrow error with LOG_SERVER_ONLY to prevent log to be sent to client */
|
||||||
if (objectViolatesOwnership)
|
edata->elevel = LOG_SERVER_ONLY;
|
||||||
{
|
ThrowErrorData(edata);
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
PG_END_TRY();
|
||||||
|
|
||||||
return false;
|
return hasOwnershipViolation;
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,7 +141,17 @@ SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, char *fragmentSch
|
||||||
fauxFunction->funcexpr = (Node *) fauxFuncExpr;
|
fauxFunction->funcexpr = (Node *) fauxFuncExpr;
|
||||||
|
|
||||||
/* set the column count to pass ruleutils checks, not used elsewhere */
|
/* set the column count to pass ruleutils checks, not used elsewhere */
|
||||||
fauxFunction->funccolcount = list_length(rte->eref->colnames);
|
if (rte->relid != 0)
|
||||||
|
{
|
||||||
|
Relation rel = RelationIdGetRelation(rte->relid);
|
||||||
|
fauxFunction->funccolcount = RelationGetNumberOfAttributes(rel);
|
||||||
|
RelationClose(rel);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fauxFunction->funccolcount = list_length(rte->eref->colnames);
|
||||||
|
}
|
||||||
|
|
||||||
fauxFunction->funccolnames = funcColumnNames;
|
fauxFunction->funccolnames = funcColumnNames;
|
||||||
fauxFunction->funccoltypes = funcColumnTypes;
|
fauxFunction->funccoltypes = funcColumnTypes;
|
||||||
fauxFunction->funccoltypmods = funcColumnTypeMods;
|
fauxFunction->funccoltypmods = funcColumnTypeMods;
|
||||||
|
|
|
@ -170,12 +170,11 @@ BreakColocation(Oid sourceRelationId)
|
||||||
*/
|
*/
|
||||||
Relation pgDistColocation = table_open(DistColocationRelationId(), ExclusiveLock);
|
Relation pgDistColocation = table_open(DistColocationRelationId(), ExclusiveLock);
|
||||||
|
|
||||||
uint32 newColocationId = GetNextColocationId();
|
uint32 oldColocationId = TableColocationId(sourceRelationId);
|
||||||
bool localOnly = false;
|
CreateColocationGroupForRelation(sourceRelationId);
|
||||||
UpdateRelationColocationGroup(sourceRelationId, newColocationId, localOnly);
|
|
||||||
|
|
||||||
/* if there is not any remaining table in the colocation group, delete it */
|
/* if there is not any remaining table in the old colocation group, delete it */
|
||||||
DeleteColocationGroupIfNoTablesBelong(sourceRelationId);
|
DeleteColocationGroupIfNoTablesBelong(oldColocationId);
|
||||||
|
|
||||||
table_close(pgDistColocation, NoLock);
|
table_close(pgDistColocation, NoLock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "distributed/version_compat.h"
|
#include "distributed/version_compat.h"
|
||||||
#include "nodes/pg_list.h"
|
#include "nodes/pg_list.h"
|
||||||
#include "storage/lockdefs.h"
|
#include "storage/lockdefs.h"
|
||||||
|
#include "utils/catcache.h"
|
||||||
#include "utils/fmgroids.h"
|
#include "utils/fmgroids.h"
|
||||||
#include "utils/hsearch.h"
|
#include "utils/hsearch.h"
|
||||||
#include "common/hashfn.h"
|
#include "common/hashfn.h"
|
||||||
|
@ -96,6 +97,8 @@ static List * GetConnectedListHelper(ForeignConstraintRelationshipNode *node,
|
||||||
bool isReferencing);
|
bool isReferencing);
|
||||||
static List * GetForeignConstraintRelationshipHelper(Oid relationId, bool isReferencing);
|
static List * GetForeignConstraintRelationshipHelper(Oid relationId, bool isReferencing);
|
||||||
|
|
||||||
|
MemoryContext ForeignConstraintRelationshipMemoryContext = NULL;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GetForeignKeyConnectedRelationIdList returns a list of relation id's for
|
* GetForeignKeyConnectedRelationIdList returns a list of relation id's for
|
||||||
|
@ -321,17 +324,36 @@ CreateForeignConstraintRelationshipGraph()
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ClearForeignConstraintRelationshipGraphContext();
|
/*
|
||||||
|
* Lazily create our memory context once and reset on every reuse.
|
||||||
|
* Since we have cleared and invalidated the fConstraintRelationshipGraph, right
|
||||||
|
* before we can simply reset the context if it was already existing.
|
||||||
|
*/
|
||||||
|
if (ForeignConstraintRelationshipMemoryContext == NULL)
|
||||||
|
{
|
||||||
|
/* make sure we've initialized CacheMemoryContext */
|
||||||
|
if (CacheMemoryContext == NULL)
|
||||||
|
{
|
||||||
|
CreateCacheMemoryContext();
|
||||||
|
}
|
||||||
|
|
||||||
MemoryContext fConstraintRelationshipMemoryContext = AllocSetContextCreateInternal(
|
ForeignConstraintRelationshipMemoryContext = AllocSetContextCreate(
|
||||||
CacheMemoryContext,
|
CacheMemoryContext,
|
||||||
"Forign Constraint Relationship Graph Context",
|
"Foreign Constraint Relationship Graph Context",
|
||||||
ALLOCSET_DEFAULT_MINSIZE,
|
ALLOCSET_DEFAULT_MINSIZE,
|
||||||
ALLOCSET_DEFAULT_INITSIZE,
|
ALLOCSET_DEFAULT_INITSIZE,
|
||||||
ALLOCSET_DEFAULT_MAXSIZE);
|
ALLOCSET_DEFAULT_MAXSIZE);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fConstraintRelationshipGraph = NULL;
|
||||||
|
MemoryContextReset(ForeignConstraintRelationshipMemoryContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert(fConstraintRelationshipGraph == NULL);
|
||||||
|
|
||||||
MemoryContext oldContext = MemoryContextSwitchTo(
|
MemoryContext oldContext = MemoryContextSwitchTo(
|
||||||
fConstraintRelationshipMemoryContext);
|
ForeignConstraintRelationshipMemoryContext);
|
||||||
|
|
||||||
fConstraintRelationshipGraph = (ForeignConstraintRelationshipGraph *) palloc(
|
fConstraintRelationshipGraph = (ForeignConstraintRelationshipGraph *) palloc(
|
||||||
sizeof(ForeignConstraintRelationshipGraph));
|
sizeof(ForeignConstraintRelationshipGraph));
|
||||||
|
@ -631,22 +653,3 @@ CreateOrFindNode(HTAB *adjacencyLists, Oid relid)
|
||||||
|
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ClearForeignConstraintRelationshipGraphContext clear all the allocated memory obtained
|
|
||||||
* for foreign constraint relationship graph. Since all the variables of relationship
|
|
||||||
* graph was obtained within the same context, destroying hash map is enough as
|
|
||||||
* it deletes the context.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
ClearForeignConstraintRelationshipGraphContext()
|
|
||||||
{
|
|
||||||
if (fConstraintRelationshipGraph == NULL)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
hash_destroy(fConstraintRelationshipGraph->nodeMap);
|
|
||||||
fConstraintRelationshipGraph = NULL;
|
|
||||||
}
|
|
||||||
|
|
|
@ -420,7 +420,7 @@ CopyShardPlacementToWorkerNodeQuery(ShardPlacement *sourceShardPlacement,
|
||||||
"auto";
|
"auto";
|
||||||
|
|
||||||
appendStringInfo(queryString,
|
appendStringInfo(queryString,
|
||||||
"SELECT citus_copy_shard_placement("
|
"SELECT pg_catalog.citus_copy_shard_placement("
|
||||||
UINT64_FORMAT ", %d, %d, "
|
UINT64_FORMAT ", %d, %d, "
|
||||||
"transfer_mode := %s)",
|
"transfer_mode := %s)",
|
||||||
sourceShardPlacement->shardId,
|
sourceShardPlacement->shardId,
|
||||||
|
|
|
@ -70,6 +70,7 @@ static void AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequence
|
||||||
PG_FUNCTION_INFO_V1(worker_apply_shard_ddl_command);
|
PG_FUNCTION_INFO_V1(worker_apply_shard_ddl_command);
|
||||||
PG_FUNCTION_INFO_V1(worker_apply_inter_shard_ddl_command);
|
PG_FUNCTION_INFO_V1(worker_apply_inter_shard_ddl_command);
|
||||||
PG_FUNCTION_INFO_V1(worker_apply_sequence_command);
|
PG_FUNCTION_INFO_V1(worker_apply_sequence_command);
|
||||||
|
PG_FUNCTION_INFO_V1(worker_adjust_identity_column_seq_ranges);
|
||||||
PG_FUNCTION_INFO_V1(worker_append_table_to_shard);
|
PG_FUNCTION_INFO_V1(worker_append_table_to_shard);
|
||||||
PG_FUNCTION_INFO_V1(worker_nextval);
|
PG_FUNCTION_INFO_V1(worker_nextval);
|
||||||
|
|
||||||
|
@ -133,6 +134,60 @@ worker_apply_inter_shard_ddl_command(PG_FUNCTION_ARGS)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* worker_adjust_identity_column_seq_ranges takes a table oid, runs an ALTER SEQUENCE statement
|
||||||
|
* for each identity column to adjust the minvalue and maxvalue of the sequence owned by
|
||||||
|
* identity column such that the sequence creates globally unique values.
|
||||||
|
* We use table oid instead of sequence name to avoid any potential conflicts between sequences of different tables. This way, we can safely iterate through identity columns on a specific table without any issues. While this may introduce a small amount of business logic to workers, it's a much safer approach overall.
|
||||||
|
*/
|
||||||
|
Datum
|
||||||
|
worker_adjust_identity_column_seq_ranges(PG_FUNCTION_ARGS)
|
||||||
|
{
|
||||||
|
CheckCitusVersion(ERROR);
|
||||||
|
|
||||||
|
Oid tableRelationId = PG_GETARG_OID(0);
|
||||||
|
|
||||||
|
EnsureTableOwner(tableRelationId);
|
||||||
|
|
||||||
|
Relation tableRelation = relation_open(tableRelationId, AccessShareLock);
|
||||||
|
TupleDesc tableTupleDesc = RelationGetDescr(tableRelation);
|
||||||
|
|
||||||
|
bool missingSequenceOk = false;
|
||||||
|
|
||||||
|
for (int attributeIndex = 0; attributeIndex < tableTupleDesc->natts;
|
||||||
|
attributeIndex++)
|
||||||
|
{
|
||||||
|
Form_pg_attribute attributeForm = TupleDescAttr(tableTupleDesc,
|
||||||
|
attributeIndex);
|
||||||
|
|
||||||
|
/* skip dropped columns */
|
||||||
|
if (attributeForm->attisdropped)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (attributeForm->attidentity)
|
||||||
|
{
|
||||||
|
Oid sequenceOid = getIdentitySequence(tableRelationId,
|
||||||
|
attributeForm->attnum,
|
||||||
|
missingSequenceOk);
|
||||||
|
|
||||||
|
Oid sequenceSchemaOid = get_rel_namespace(sequenceOid);
|
||||||
|
char *sequenceSchemaName = get_namespace_name(sequenceSchemaOid);
|
||||||
|
char *sequenceName = get_rel_name(sequenceOid);
|
||||||
|
Oid sequenceTypeId = pg_get_sequencedef(sequenceOid)->seqtypid;
|
||||||
|
|
||||||
|
AlterSequenceMinMax(sequenceOid, sequenceSchemaName, sequenceName,
|
||||||
|
sequenceTypeId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
relation_close(tableRelation, NoLock);
|
||||||
|
|
||||||
|
PG_RETURN_VOID();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* worker_apply_sequence_command takes a CREATE SEQUENCE command string, runs the
|
* worker_apply_sequence_command takes a CREATE SEQUENCE command string, runs the
|
||||||
* CREATE SEQUENCE command then creates and runs an ALTER SEQUENCE statement
|
* CREATE SEQUENCE command then creates and runs an ALTER SEQUENCE statement
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
pg_send_cancellation
|
|
|
@ -1,24 +0,0 @@
|
||||||
citus_top_builddir = ../../..
|
|
||||||
|
|
||||||
PROGRAM = pg_send_cancellation
|
|
||||||
PGFILEDESC = "pg_send_cancellation sends a custom cancellation message"
|
|
||||||
OBJS = $(citus_abs_srcdir)/src/bin/pg_send_cancellation/pg_send_cancellation.o
|
|
||||||
PG_CPPFLAGS = -I$(libpq_srcdir)
|
|
||||||
PG_LIBS_INTERNAL = $(libpq_pgport)
|
|
||||||
PG_LDFLAGS += $(LDFLAGS)
|
|
||||||
|
|
||||||
include $(citus_top_builddir)/Makefile.global
|
|
||||||
|
|
||||||
# We reuse all the Citus flags (incl. security flags), but we are building a program not a shared library
|
|
||||||
# We sometimes build Citus with a newer version of gcc than Postgres was built
|
|
||||||
# with and this breaks LTO (link-time optimization). Even if disabling it can
|
|
||||||
# have some perf impact this is ok because pg_send_cancellation is only used
|
|
||||||
# for tests anyway.
|
|
||||||
override CFLAGS := $(filter-out -shared, $(CFLAGS)) -fno-lto
|
|
||||||
|
|
||||||
# Filter out unneeded dependencies
|
|
||||||
override LIBS := $(filter-out -lz -lreadline -ledit -ltermcap -lncurses -lcurses -lpam, $(LIBS))
|
|
||||||
|
|
||||||
clean: clean-pg_send_cancellation
|
|
||||||
clean-pg_send_cancellation:
|
|
||||||
rm -f $(PROGRAM) $(OBJS)
|
|
|
@ -1,47 +0,0 @@
|
||||||
# pg_send_cancellation
|
|
||||||
|
|
||||||
pg_send_cancellation is a program for manually sending a cancellation
|
|
||||||
to a Postgres endpoint. It is effectively a command-line version of
|
|
||||||
PQcancel in libpq, but it can use any PID or cancellation key.
|
|
||||||
|
|
||||||
We use pg_send_cancellation primarily to propagate cancellations between pgbouncers
|
|
||||||
behind a load balancer. Since the cancellation protocol involves
|
|
||||||
opening a new connection, the new connection may go to a different
|
|
||||||
node that does not recognize the cancellation key. To handle that
|
|
||||||
scenario, we modified pgbouncer to pass unrecognized cancellation
|
|
||||||
keys to a shell command.
|
|
||||||
|
|
||||||
Users can configure the cancellation_command, which will be run with:
|
|
||||||
```
|
|
||||||
<cancellation_command> <client ip> <client port> <pid> <cancel key>
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that pgbouncer does not use actual PIDs. Instead, it generates PID and cancellation key together a random 8-byte number. This makes the chance of collisions exceedingly small.
|
|
||||||
|
|
||||||
By providing pg_send_cancellation as part of Citus, we can use a shell script that pgbouncer invokes to propagate the cancellation to all *other* worker nodes in the same cluster, for example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
#!/bin/sh
|
|
||||||
remote_ip=$1
|
|
||||||
remote_port=$2
|
|
||||||
pid=$3
|
|
||||||
cancel_key=$4
|
|
||||||
|
|
||||||
postgres_path=/usr/pgsql-14/bin
|
|
||||||
pgbouncer_port=6432
|
|
||||||
|
|
||||||
nodes_query="select nodename from pg_dist_node where groupid > 0 and groupid not in (select groupid from pg_dist_local_group) and nodecluster = current_setting('citus.cluster_name')"
|
|
||||||
|
|
||||||
# Get hostnames of other worker nodes in the cluster, and send cancellation to their pgbouncers
|
|
||||||
$postgres_path/psql -c "$nodes_query" -tAX | xargs -n 1 sh -c "$postgres_path/pg_send_cancellation $pid $cancel_key \$0 $pgbouncer_port"
|
|
||||||
```
|
|
||||||
|
|
||||||
One thing we need to be careful about is that the cancellations do not get forwarded
|
|
||||||
back-and-forth. This is handled in pgbouncer by setting the last bit of all generated
|
|
||||||
cancellation keys (sent to clients) to 1, and setting the last bit of all forwarded bits to 0.
|
|
||||||
That way, when a pgbouncer receives a cancellation key with the last bit set to 0,
|
|
||||||
it knows it is from another pgbouncer and should not forward further, and should set
|
|
||||||
the last bit to 1 when comparing to stored cancellation keys.
|
|
||||||
|
|
||||||
Another thing we need to be careful about is that the integers should be encoded
|
|
||||||
as big endian on the wire.
|
|
|
@ -1,261 +0,0 @@
|
||||||
/*
|
|
||||||
* pg_send_cancellation is a program for manually sending a cancellation
|
|
||||||
* to a Postgres endpoint. It is effectively a command-line version of
|
|
||||||
* PQcancel in libpq, but it can use any PID or cancellation key.
|
|
||||||
*
|
|
||||||
* Portions Copyright (c) Citus Data, Inc.
|
|
||||||
*
|
|
||||||
* For the internal_cancel function:
|
|
||||||
*
|
|
||||||
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
|
|
||||||
* Portions Copyright (c) 1994, Regents of the University of California
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software and its
|
|
||||||
* documentation for any purpose, without fee, and without a written agreement
|
|
||||||
* is hereby granted, provided that the above copyright notice and this
|
|
||||||
* paragraph and the following two paragraphs appear in all copies.
|
|
||||||
*
|
|
||||||
* IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
|
|
||||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
|
|
||||||
* LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
|
|
||||||
* DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE
|
|
||||||
* POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
*
|
|
||||||
* THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
|
|
||||||
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
|
|
||||||
* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
|
||||||
* ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
|
|
||||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#include "postgres_fe.h"
|
|
||||||
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <ctype.h>
|
|
||||||
#include <time.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
#include "common/ip.h"
|
|
||||||
#include "common/link-canary.h"
|
|
||||||
#include "common/scram-common.h"
|
|
||||||
#include "common/string.h"
|
|
||||||
#include "libpq-fe.h"
|
|
||||||
#include "libpq-int.h"
|
|
||||||
#include "mb/pg_wchar.h"
|
|
||||||
#include "port/pg_bswap.h"
|
|
||||||
|
|
||||||
|
|
||||||
#define ERROR_BUFFER_SIZE 256
|
|
||||||
|
|
||||||
|
|
||||||
static int internal_cancel(SockAddr *raddr, int be_pid, int be_key,
|
|
||||||
char *errbuf, int errbufsize);
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* main entry point into the pg_send_cancellation program.
|
|
||||||
*/
|
|
||||||
int
|
|
||||||
main(int argc, char *argv[])
|
|
||||||
{
|
|
||||||
if (argc == 2 && strcmp(argv[1], "-V") == 0)
|
|
||||||
{
|
|
||||||
pg_fprintf(stdout, "pg_send_cancellation (PostgreSQL) " PG_VERSION "\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (argc < 4 || argc > 5)
|
|
||||||
{
|
|
||||||
char *program = argv[0];
|
|
||||||
pg_fprintf(stderr, "%s requires 4 arguments\n\n", program);
|
|
||||||
pg_fprintf(stderr, "Usage: %s <pid> <cancel key> <hostname> [port]\n", program);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
char *pidString = argv[1];
|
|
||||||
char *cancelKeyString = argv[2];
|
|
||||||
char *host = argv[3];
|
|
||||||
char *portString = "5432";
|
|
||||||
|
|
||||||
if (argc >= 5)
|
|
||||||
{
|
|
||||||
portString = argv[4];
|
|
||||||
}
|
|
||||||
|
|
||||||
/* parse the PID and cancellation key */
|
|
||||||
int pid = strtol(pidString, NULL, 10);
|
|
||||||
int cancelAuthCode = strtol(cancelKeyString, NULL, 10);
|
|
||||||
|
|
||||||
char errorBuffer[ERROR_BUFFER_SIZE] = { 0 };
|
|
||||||
|
|
||||||
struct addrinfo *ipAddressList;
|
|
||||||
struct addrinfo hint;
|
|
||||||
int ipAddressListFamily = AF_UNSPEC;
|
|
||||||
SockAddr socketAddress;
|
|
||||||
|
|
||||||
memset(&hint, 0, sizeof(hint));
|
|
||||||
hint.ai_socktype = SOCK_STREAM;
|
|
||||||
hint.ai_family = ipAddressListFamily;
|
|
||||||
|
|
||||||
/* resolve the hostname to an IP */
|
|
||||||
int ret = pg_getaddrinfo_all(host, portString, &hint, &ipAddressList);
|
|
||||||
if (ret || !ipAddressList)
|
|
||||||
{
|
|
||||||
pg_fprintf(stderr, "could not translate host name \"%s\" to address: %s\n",
|
|
||||||
host, gai_strerror(ret));
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ipAddressList->ai_addrlen > sizeof(socketAddress.addr))
|
|
||||||
{
|
|
||||||
pg_fprintf(stderr, "invalid address length");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Explanation of IGNORE-BANNED:
|
|
||||||
* This is a common pattern when using getaddrinfo. The system guarantees
|
|
||||||
* that ai_addrlen < sizeof(socketAddress.addr). Out of an abundance of
|
|
||||||
* caution. We also check it above.
|
|
||||||
*/
|
|
||||||
memcpy(&socketAddress.addr, ipAddressList->ai_addr, ipAddressList->ai_addrlen); /* IGNORE-BANNED */
|
|
||||||
socketAddress.salen = ipAddressList->ai_addrlen;
|
|
||||||
|
|
||||||
/* send the cancellation */
|
|
||||||
bool cancelSucceeded = internal_cancel(&socketAddress, pid, cancelAuthCode,
|
|
||||||
errorBuffer, sizeof(errorBuffer));
|
|
||||||
if (!cancelSucceeded)
|
|
||||||
{
|
|
||||||
pg_fprintf(stderr, "sending cancellation to %s:%s failed: %s",
|
|
||||||
host, portString, errorBuffer);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
pg_freeaddrinfo_all(ipAddressListFamily, ipAddressList);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* *INDENT-OFF* */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* internal_cancel is copied from fe-connect.c
|
|
||||||
*
|
|
||||||
* The return value is true if the cancel request was successfully
|
|
||||||
* dispatched, false if not (in which case an error message is available).
|
|
||||||
* Note: successful dispatch is no guarantee that there will be any effect at
|
|
||||||
* the backend. The application must read the operation result as usual.
|
|
||||||
*
|
|
||||||
* CAUTION: we want this routine to be safely callable from a signal handler
|
|
||||||
* (for example, an application might want to call it in a SIGINT handler).
|
|
||||||
* This means we cannot use any C library routine that might be non-reentrant.
|
|
||||||
* malloc/free are often non-reentrant, and anything that might call them is
|
|
||||||
* just as dangerous. We avoid sprintf here for that reason. Building up
|
|
||||||
* error messages with strcpy/strcat is tedious but should be quite safe.
|
|
||||||
* We also save/restore errno in case the signal handler support doesn't.
|
|
||||||
*
|
|
||||||
* internal_cancel() is an internal helper function to make code-sharing
|
|
||||||
* between the two versions of the cancel function possible.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
internal_cancel(SockAddr *raddr, int be_pid, int be_key,
|
|
||||||
char *errbuf, int errbufsize)
|
|
||||||
{
|
|
||||||
int save_errno = SOCK_ERRNO;
|
|
||||||
pgsocket tmpsock = PGINVALID_SOCKET;
|
|
||||||
char sebuf[PG_STRERROR_R_BUFLEN];
|
|
||||||
int maxlen;
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
uint32 packetlen;
|
|
||||||
CancelRequestPacket cp;
|
|
||||||
} crp;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We need to open a temporary connection to the postmaster. Do this with
|
|
||||||
* only kernel calls.
|
|
||||||
*/
|
|
||||||
if ((tmpsock = socket(raddr->addr.ss_family, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
|
|
||||||
{
|
|
||||||
strlcpy(errbuf, "PQcancel() -- socket() failed: ", errbufsize);
|
|
||||||
goto cancel_errReturn;
|
|
||||||
}
|
|
||||||
retry3:
|
|
||||||
if (connect(tmpsock, (struct sockaddr *) &raddr->addr, raddr->salen) < 0)
|
|
||||||
{
|
|
||||||
if (SOCK_ERRNO == EINTR)
|
|
||||||
/* Interrupted system call - we'll just try again */
|
|
||||||
goto retry3;
|
|
||||||
strlcpy(errbuf, "PQcancel() -- connect() failed: ", errbufsize);
|
|
||||||
goto cancel_errReturn;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We needn't set nonblocking I/O or NODELAY options here.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Create and send the cancel request packet. */
|
|
||||||
|
|
||||||
crp.packetlen = pg_hton32((uint32) sizeof(crp));
|
|
||||||
crp.cp.cancelRequestCode = (MsgType) pg_hton32(CANCEL_REQUEST_CODE);
|
|
||||||
crp.cp.backendPID = pg_hton32(be_pid);
|
|
||||||
crp.cp.cancelAuthCode = pg_hton32(be_key);
|
|
||||||
|
|
||||||
retry4:
|
|
||||||
if (send(tmpsock, (char *) &crp, sizeof(crp), 0) != (int) sizeof(crp))
|
|
||||||
{
|
|
||||||
if (SOCK_ERRNO == EINTR)
|
|
||||||
/* Interrupted system call - we'll just try again */
|
|
||||||
goto retry4;
|
|
||||||
strlcpy(errbuf, "PQcancel() -- send() failed: ", errbufsize);
|
|
||||||
goto cancel_errReturn;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait for the postmaster to close the connection, which indicates that
|
|
||||||
* it's processed the request. Without this delay, we might issue another
|
|
||||||
* command only to find that our cancel zaps that command instead of the
|
|
||||||
* one we thought we were canceling. Note we don't actually expect this
|
|
||||||
* read to obtain any data, we are just waiting for EOF to be signaled.
|
|
||||||
*/
|
|
||||||
retry5:
|
|
||||||
if (recv(tmpsock, (char *) &crp, 1, 0) < 0)
|
|
||||||
{
|
|
||||||
if (SOCK_ERRNO == EINTR)
|
|
||||||
/* Interrupted system call - we'll just try again */
|
|
||||||
goto retry5;
|
|
||||||
/* we ignore other error conditions */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* All done */
|
|
||||||
closesocket(tmpsock);
|
|
||||||
SOCK_ERRNO_SET(save_errno);
|
|
||||||
return true;
|
|
||||||
|
|
||||||
cancel_errReturn:
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure we don't overflow the error buffer. Leave space for the \n at
|
|
||||||
* the end, and for the terminating zero.
|
|
||||||
*/
|
|
||||||
maxlen = errbufsize - strlen(errbuf) - 2;
|
|
||||||
if (maxlen >= 0)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Explanation of IGNORE-BANNED:
|
|
||||||
* This is well-tested libpq code that we would like to preserve in its
|
|
||||||
* original form. The appropriate length calculation is done above.
|
|
||||||
*/
|
|
||||||
strncat(errbuf, SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)), /* IGNORE-BANNED */
|
|
||||||
maxlen);
|
|
||||||
strcat(errbuf, "\n"); /* IGNORE-BANNED */
|
|
||||||
}
|
|
||||||
if (tmpsock != PGINVALID_SOCKET)
|
|
||||||
closesocket(tmpsock);
|
|
||||||
SOCK_ERRNO_SET(save_errno);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* *INDENT-ON* */
|
|
|
@ -566,6 +566,9 @@ extern bool ConstrTypeCitusCanDefaultName(ConstrType constrType);
|
||||||
extern char * GetAlterColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId,
|
extern char * GetAlterColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId,
|
||||||
char *colname, bool missingTableOk);
|
char *colname, bool missingTableOk);
|
||||||
|
|
||||||
|
extern void ErrorIfTableHasUnsupportedIdentityColumn(Oid relationId);
|
||||||
|
extern void ErrorIfTableHasIdentityColumn(Oid relationId);
|
||||||
|
|
||||||
/* text_search.c - forward declarations */
|
/* text_search.c - forward declarations */
|
||||||
extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address);
|
extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address);
|
||||||
extern List * GetCreateTextSearchDictionaryStatements(const ObjectAddress *address);
|
extern List * GetCreateTextSearchDictionaryStatements(const ObjectAddress *address);
|
||||||
|
|
|
@ -124,8 +124,7 @@ typedef enum IncludeSequenceDefaults
|
||||||
typedef enum IncludeIdentities
|
typedef enum IncludeIdentities
|
||||||
{
|
{
|
||||||
NO_IDENTITY = 0, /* don't include identities */
|
NO_IDENTITY = 0, /* don't include identities */
|
||||||
INCLUDE_IDENTITY_AS_SEQUENCE_DEFAULTS = 1, /* include identities as sequences */
|
INCLUDE_IDENTITY = 1 /* include identities as-is*/
|
||||||
INCLUDE_IDENTITY = 2 /* include identities as-is*/
|
|
||||||
} IncludeIdentities;
|
} IncludeIdentities;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@ extern bool ShouldUndistributeCitusLocalTable(Oid relationId);
|
||||||
extern List * ReferencedRelationIdList(Oid relationId);
|
extern List * ReferencedRelationIdList(Oid relationId);
|
||||||
extern List * ReferencingRelationIdList(Oid relationId);
|
extern List * ReferencingRelationIdList(Oid relationId);
|
||||||
extern void SetForeignConstraintRelationshipGraphInvalid(void);
|
extern void SetForeignConstraintRelationshipGraphInvalid(void);
|
||||||
extern void ClearForeignConstraintRelationshipGraphContext(void);
|
|
||||||
extern bool OidVisited(HTAB *oidVisitedMap, Oid oid);
|
extern bool OidVisited(HTAB *oidVisitedMap, Oid oid);
|
||||||
extern void VisitOid(HTAB *oidVisitedMap, Oid oid);
|
extern void VisitOid(HTAB *oidVisitedMap, Oid oid);
|
||||||
|
|
||||||
|
|
|
@ -101,11 +101,12 @@ extern void SyncNodeMetadataToNodesMain(Datum main_arg);
|
||||||
extern void SignalMetadataSyncDaemon(Oid database, int sig);
|
extern void SignalMetadataSyncDaemon(Oid database, int sig);
|
||||||
extern bool ShouldInitiateMetadataSync(bool *lockFailure);
|
extern bool ShouldInitiateMetadataSync(bool *lockFailure);
|
||||||
extern List * SequenceDependencyCommandList(Oid relationId);
|
extern List * SequenceDependencyCommandList(Oid relationId);
|
||||||
|
extern List * IdentitySequenceDependencyCommandList(Oid targetRelationId);
|
||||||
|
|
||||||
extern List * DDLCommandsForSequence(Oid sequenceOid, char *ownerName);
|
extern List * DDLCommandsForSequence(Oid sequenceOid, char *ownerName);
|
||||||
extern List * GetSequencesFromAttrDef(Oid attrdefOid);
|
extern List * GetSequencesFromAttrDef(Oid attrdefOid);
|
||||||
extern void GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList,
|
extern void GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList,
|
||||||
AttrNumber attnum);
|
AttrNumber attnum, char depType);
|
||||||
extern List * GetDependentFunctionsWithRelation(Oid relationId);
|
extern List * GetDependentFunctionsWithRelation(Oid relationId);
|
||||||
extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum);
|
extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum);
|
||||||
extern void SetLocalEnableMetadataSync(bool state);
|
extern void SetLocalEnableMetadataSync(bool state);
|
||||||
|
@ -146,6 +147,8 @@ extern void SyncDeleteColocationGroupToNodes(uint32 colocationId);
|
||||||
"placementid = EXCLUDED.placementid"
|
"placementid = EXCLUDED.placementid"
|
||||||
#define METADATA_SYNC_CHANNEL "metadata_sync"
|
#define METADATA_SYNC_CHANNEL "metadata_sync"
|
||||||
|
|
||||||
|
#define WORKER_ADJUST_IDENTITY_COLUMN_SEQ_RANGES \
|
||||||
|
"SELECT pg_catalog.worker_adjust_identity_column_seq_ranges(%s)"
|
||||||
|
|
||||||
/* controlled via GUC */
|
/* controlled via GUC */
|
||||||
extern char *EnableManualMetadataChangesForUser;
|
extern char *EnableManualMetadataChangesForUser;
|
||||||
|
|
|
@ -19,4 +19,9 @@ extern DestReceiver * CreateShardCopyDestReceiver(EState *executorState,
|
||||||
List *destinationShardFullyQualifiedName,
|
List *destinationShardFullyQualifiedName,
|
||||||
uint32_t destinationNodeId);
|
uint32_t destinationNodeId);
|
||||||
|
|
||||||
|
extern const char * CopyableColumnNamesFromRelationName(const char *schemaName, const
|
||||||
|
char *relationName);
|
||||||
|
|
||||||
|
extern const char * CopyableColumnNamesFromTupleDesc(TupleDesc tupdesc);
|
||||||
|
|
||||||
#endif /* WORKER_SHARD_COPY_H_ */
|
#endif /* WORKER_SHARD_COPY_H_ */
|
||||||
|
|
|
@ -55,6 +55,14 @@ pg_strtoint64(char *s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RelationGetSmgr got backported in 13.10 and 14.7 so redefining it for any
|
||||||
|
* version higher causes compilation errors due to redefining of the function.
|
||||||
|
* We want to use it in all versions. So we backport it ourselves in earlier
|
||||||
|
* versions, and rely on the Postgres provided version in the later versions.
|
||||||
|
*/
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_13 && PG_VERSION_NUM < 130010 \
|
||||||
|
|| PG_VERSION_NUM >= PG_VERSION_14 && PG_VERSION_NUM < 140007
|
||||||
static inline SMgrRelation
|
static inline SMgrRelation
|
||||||
RelationGetSmgr(Relation rel)
|
RelationGetSmgr(Relation rel)
|
||||||
{
|
{
|
||||||
|
@ -66,6 +74,9 @@ RelationGetSmgr(Relation rel)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#define CREATE_SEQUENCE_COMMAND \
|
#define CREATE_SEQUENCE_COMMAND \
|
||||||
"CREATE SEQUENCE IF NOT EXISTS %s AS %s INCREMENT BY " INT64_FORMAT \
|
"CREATE SEQUENCE IF NOT EXISTS %s AS %s INCREMENT BY " INT64_FORMAT \
|
||||||
" MINVALUE " INT64_FORMAT " MAXVALUE " INT64_FORMAT \
|
" MINVALUE " INT64_FORMAT " MAXVALUE " INT64_FORMAT \
|
||||||
|
|
|
@ -253,7 +253,7 @@ s/pg_cancel_backend\('[0-9]+'::bigint\)/pg_cancel_backend('xxxxx'::bigint)/g
|
||||||
s/issuing SELECT pg_cancel_backend\([0-9]+::integer\)/issuing SELECT pg_cancel_backend(xxxxx::integer)/g
|
s/issuing SELECT pg_cancel_backend\([0-9]+::integer\)/issuing SELECT pg_cancel_backend(xxxxx::integer)/g
|
||||||
|
|
||||||
# shard_rebalancer output for flaky nodeIds
|
# shard_rebalancer output for flaky nodeIds
|
||||||
s/issuing SELECT citus_copy_shard_placement\(43[0-9]+,[0-9]+,[0-9]+,'block_writes'\)/issuing SELECT citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')/g
|
s/issuing SELECT pg_catalog.citus_copy_shard_placement\(43[0-9]+,[0-9]+,[0-9]+,'block_writes'\)/issuing SELECT pg_catalog.citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')/g
|
||||||
|
|
||||||
# node id in run_command_on_all_nodes warning
|
# node id in run_command_on_all_nodes warning
|
||||||
s/Error on node with node id [0-9]+/Error on node with node id xxxxx/g
|
s/Error on node with node id [0-9]+/Error on node with node id xxxxx/g
|
||||||
|
|
|
@ -10,7 +10,6 @@ test: isolation_move_placement_vs_modification
|
||||||
test: isolation_move_placement_vs_modification_fk
|
test: isolation_move_placement_vs_modification_fk
|
||||||
test: isolation_tenant_isolation_with_fkey_to_reference
|
test: isolation_tenant_isolation_with_fkey_to_reference
|
||||||
test: isolation_ref2ref_foreign_keys_enterprise
|
test: isolation_ref2ref_foreign_keys_enterprise
|
||||||
test: isolation_pg_send_cancellation
|
|
||||||
test: isolation_shard_move_vs_start_metadata_sync
|
test: isolation_shard_move_vs_start_metadata_sync
|
||||||
test: isolation_tenant_isolation
|
test: isolation_tenant_isolation
|
||||||
test: isolation_tenant_isolation_nonblocking
|
test: isolation_tenant_isolation_nonblocking
|
||||||
|
|
|
@ -304,6 +304,57 @@ SELECT public.wait_until_metadata_sync(30000);
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- make sure a non-super user can rebalance when there are reference tables to replicate
|
||||||
|
CREATE TABLE ref_table(a int primary key);
|
||||||
|
SELECT create_reference_table('ref_table');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- add a new node to trigger replicate_reference_tables task
|
||||||
|
SELECT 1 FROM citus_set_coordinator_host('localhost');
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SET ROLE non_super_user_rebalance;
|
||||||
|
SELECT 1 FROM citus_rebalance_start(shard_transfer_mode := 'force_logical');
|
||||||
|
NOTICE: Scheduled 1 moves as job xxx
|
||||||
|
DETAIL: Rebalance scheduled as background job
|
||||||
|
HINT: To monitor progress, run: SELECT * FROM citus_rebalance_status();
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- wait for success
|
||||||
|
SELECT citus_rebalance_wait();
|
||||||
|
citus_rebalance_wait
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT state, details from citus_rebalance_status();
|
||||||
|
state | details
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
finished | {"tasks": [], "task_state_counts": {"done": 2}}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
RESET ROLE;
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
DROP SCHEMA background_rebalance CASCADE;
|
DROP SCHEMA background_rebalance CASCADE;
|
||||||
DROP USER non_super_user_rebalance;
|
DROP USER non_super_user_rebalance;
|
||||||
|
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
|
|
@ -60,7 +60,7 @@ SELECT create_reference_table('reference_table');
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY);
|
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY, genid integer GENERATED ALWAYS AS ( measureid + 3 ) stored, value varchar(44), col_todrop integer);
|
||||||
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
|
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
|
||||||
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
|
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
@ -84,8 +84,9 @@ ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) RE
|
||||||
-- END : Create Foreign key constraints.
|
-- END : Create Foreign key constraints.
|
||||||
-- BEGIN : Load data into tables.
|
-- BEGIN : Load data into tables.
|
||||||
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
|
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
|
||||||
INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i;
|
INSERT INTO colocated_dist_table(measureid, value, col_todrop) SELECT i,'Value',i FROM generate_series(0,1000)i;
|
||||||
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
|
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
|
||||||
|
ALTER TABLE colocated_dist_table DROP COLUMN col_todrop;
|
||||||
SELECT COUNT(*) FROM sensors;
|
SELECT COUNT(*) FROM sensors;
|
||||||
count
|
count
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
|
@ -56,7 +56,7 @@ SELECT create_reference_table('reference_table');
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY);
|
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY, genid integer GENERATED ALWAYS AS ( measureid + 3 ) stored, value varchar(44), col_todrop integer);
|
||||||
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
|
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
|
||||||
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
|
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
@ -80,8 +80,9 @@ ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) RE
|
||||||
-- END : Create Foreign key constraints.
|
-- END : Create Foreign key constraints.
|
||||||
-- BEGIN : Load data into tables.
|
-- BEGIN : Load data into tables.
|
||||||
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
|
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
|
||||||
INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i;
|
INSERT INTO colocated_dist_table(measureid, value, col_todrop) SELECT i,'Value',i FROM generate_series(0,1000)i;
|
||||||
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
|
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
|
||||||
|
ALTER TABLE colocated_dist_table DROP COLUMN col_todrop;
|
||||||
SELECT COUNT(*) FROM sensors;
|
SELECT COUNT(*) FROM sensors;
|
||||||
count
|
count
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
|
@ -64,11 +64,11 @@ SET citus.multi_shard_modify_mode TO sequential;
|
||||||
SELECT citus_update_table_statistics('test_table_statistics_hash');
|
SELECT citus_update_table_statistics('test_table_statistics_hash');
|
||||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT 981000 AS shard_id, 'public.test_table_statistics_hash_981000' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, 'public.test_table_statistics_hash_981001' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, 'public.test_table_statistics_hash_981002' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, 'public.test_table_statistics_hash_981003' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, 'public.test_table_statistics_hash_981004' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, 'public.test_table_statistics_hash_981005' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, 'public.test_table_statistics_hash_981006' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, 'public.test_table_statistics_hash_981007' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
NOTICE: issuing SELECT 981000 AS shard_id, 'public.test_table_statistics_hash_981000' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, 'public.test_table_statistics_hash_981001' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, 'public.test_table_statistics_hash_981002' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, 'public.test_table_statistics_hash_981003' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, 'public.test_table_statistics_hash_981004' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, 'public.test_table_statistics_hash_981005' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, 'public.test_table_statistics_hash_981006' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, 'public.test_table_statistics_hash_981007' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT 981000 AS shard_id, 'public.test_table_statistics_hash_981000' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, 'public.test_table_statistics_hash_981001' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, 'public.test_table_statistics_hash_981002' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, 'public.test_table_statistics_hash_981003' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, 'public.test_table_statistics_hash_981004' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, 'public.test_table_statistics_hash_981005' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, 'public.test_table_statistics_hash_981006' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, 'public.test_table_statistics_hash_981007' AS shard_name, pg_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
NOTICE: issuing SELECT 981000 AS shard_id, 'public.test_table_statistics_hash_981000' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, 'public.test_table_statistics_hash_981001' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, 'public.test_table_statistics_hash_981002' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, 'public.test_table_statistics_hash_981003' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, 'public.test_table_statistics_hash_981004' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, 'public.test_table_statistics_hash_981005' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, 'public.test_table_statistics_hash_981006' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, 'public.test_table_statistics_hash_981007' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
@ -152,11 +152,11 @@ SET citus.multi_shard_modify_mode TO sequential;
|
||||||
SELECT citus_update_table_statistics('test_table_statistics_append');
|
SELECT citus_update_table_statistics('test_table_statistics_append');
|
||||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT 981008 AS shard_id, 'public.test_table_statistics_append_981008' AS shard_name, pg_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, 'public.test_table_statistics_append_981009' AS shard_name, pg_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
NOTICE: issuing SELECT 981008 AS shard_id, 'public.test_table_statistics_append_981008' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, 'public.test_table_statistics_append_981009' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT 981008 AS shard_id, 'public.test_table_statistics_append_981008' AS shard_name, pg_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, 'public.test_table_statistics_append_981009' AS shard_name, pg_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
NOTICE: issuing SELECT 981008 AS shard_id, 'public.test_table_statistics_append_981008' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, 'public.test_table_statistics_append_981009' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
|
|
@ -36,6 +36,19 @@ set citus.shard_replication_factor to 2;
|
||||||
select create_distributed_table_concurrently('test','key', 'hash');
|
select create_distributed_table_concurrently('test','key', 'hash');
|
||||||
ERROR: cannot distribute a table concurrently when citus.shard_replication_factor > 1
|
ERROR: cannot distribute a table concurrently when citus.shard_replication_factor > 1
|
||||||
set citus.shard_replication_factor to 1;
|
set citus.shard_replication_factor to 1;
|
||||||
|
set citus.shard_replication_factor to 2;
|
||||||
|
create table dist_1(a int);
|
||||||
|
select create_distributed_table('dist_1', 'a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
set citus.shard_replication_factor to 1;
|
||||||
|
create table dist_2(a int);
|
||||||
|
select create_distributed_table_concurrently('dist_2', 'a', colocate_with=>'dist_1');
|
||||||
|
ERROR: cannot create distributed table concurrently because Citus allows concurrent table distribution only when citus.shard_replication_factor = 1
|
||||||
|
HINT: table dist_2 is requested to be colocated with dist_1 which has citus.shard_replication_factor > 1
|
||||||
begin;
|
begin;
|
||||||
select create_distributed_table_concurrently('test','key');
|
select create_distributed_table_concurrently('test','key');
|
||||||
ERROR: create_distributed_table_concurrently cannot run inside a transaction block
|
ERROR: create_distributed_table_concurrently cannot run inside a transaction block
|
||||||
|
@ -138,27 +151,8 @@ select count(*) from test;
|
||||||
rollback;
|
rollback;
|
||||||
-- verify that we can undistribute the table
|
-- verify that we can undistribute the table
|
||||||
begin;
|
begin;
|
||||||
|
set local client_min_messages to warning;
|
||||||
select undistribute_table('test', cascade_via_foreign_keys := true);
|
select undistribute_table('test', cascade_via_foreign_keys := true);
|
||||||
NOTICE: converting the partitions of create_distributed_table_concurrently.test
|
|
||||||
NOTICE: creating a new table for create_distributed_table_concurrently.test
|
|
||||||
NOTICE: dropping the old create_distributed_table_concurrently.test
|
|
||||||
NOTICE: renaming the new table to create_distributed_table_concurrently.test
|
|
||||||
NOTICE: creating a new table for create_distributed_table_concurrently.ref
|
|
||||||
NOTICE: moving the data of create_distributed_table_concurrently.ref
|
|
||||||
NOTICE: dropping the old create_distributed_table_concurrently.ref
|
|
||||||
NOTICE: drop cascades to constraint test_id_fkey_1190041 on table create_distributed_table_concurrently.test_1190041
|
|
||||||
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
|
|
||||||
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
|
|
||||||
SQL statement "DROP TABLE create_distributed_table_concurrently.ref CASCADE"
|
|
||||||
NOTICE: renaming the new table to create_distributed_table_concurrently.ref
|
|
||||||
NOTICE: creating a new table for create_distributed_table_concurrently.test_1
|
|
||||||
NOTICE: moving the data of create_distributed_table_concurrently.test_1
|
|
||||||
NOTICE: dropping the old create_distributed_table_concurrently.test_1
|
|
||||||
NOTICE: renaming the new table to create_distributed_table_concurrently.test_1
|
|
||||||
NOTICE: creating a new table for create_distributed_table_concurrently.test_2
|
|
||||||
NOTICE: moving the data of create_distributed_table_concurrently.test_2
|
|
||||||
NOTICE: dropping the old create_distributed_table_concurrently.test_2
|
|
||||||
NOTICE: renaming the new table to create_distributed_table_concurrently.test_2
|
|
||||||
undistribute_table
|
undistribute_table
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
@ -245,7 +239,7 @@ insert into dist_table4 select s from generate_series(1,100) s;
|
||||||
select count(*) as total from dist_table4;
|
select count(*) as total from dist_table4;
|
||||||
total
|
total
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
100
|
100
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- verify we do not allow foreign keys from distributed table to citus local table concurrently
|
-- verify we do not allow foreign keys from distributed table to citus local table concurrently
|
||||||
|
@ -295,13 +289,13 @@ select count(*) from test_columnar;
|
||||||
select id from test_columnar where id = 1;
|
select id from test_columnar where id = 1;
|
||||||
id
|
id
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
1
|
1
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
select id from test_columnar where id = 51;
|
select id from test_columnar where id = 51;
|
||||||
id
|
id
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
51
|
51
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
select count(*) from test_columnar_1;
|
select count(*) from test_columnar_1;
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
CREATE SCHEMA failure_local_modification;
|
||||||
|
SET search_path TO failure_local_modification;
|
||||||
|
SET citus.next_shard_id TO 1989000;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
CREATE TABLE failover_to_local (key int PRIMARY KEY, value varchar(10));
|
||||||
|
SELECT create_reference_table('failover_to_local');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
SET search_path TO failure_local_modification;
|
||||||
|
-- prevent local connection establishment, imitate
|
||||||
|
-- a failure
|
||||||
|
ALTER SYSTEM SET citus.local_shared_pool_size TO -1;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
pg_reload_conf
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT pg_sleep(0.2);
|
||||||
|
pg_sleep
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
-- we force the execution to use connections (e.g., remote execution)
|
||||||
|
-- however, we do not allow connections as local_shared_pool_size=-1
|
||||||
|
-- so, properly error out
|
||||||
|
SET LOCAL citus.enable_local_execution TO false;
|
||||||
|
INSERT INTO failover_to_local VALUES (1,'1'), (2,'2'),(3,'3'),(4,'4');
|
||||||
|
ERROR: the total number of connections on the server is more than max_connections(100)
|
||||||
|
HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true;
|
||||||
|
ROLLBACK;
|
||||||
|
ALTER SYSTEM RESET citus.local_shared_pool_size;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
pg_reload_conf
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET client_min_messages TO ERROR;
|
||||||
|
DROP SCHEMA failure_local_modification cascade;
|
|
@ -210,6 +210,7 @@ select create_distributed_table('partitioned_tbl_with_fkey','x');
|
||||||
|
|
||||||
create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31');
|
create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31');
|
||||||
create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31');
|
create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31');
|
||||||
|
create table partition_3_with_fkey partition of partitioned_tbl_with_fkey for values from ('2024-01-01') to ('2024-12-31');
|
||||||
insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s;
|
insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s;
|
||||||
ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id);
|
ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id);
|
||||||
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'partitioned_tbl_with_fkey'::regclass ORDER BY shardid LIMIT 1)
|
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'partitioned_tbl_with_fkey'::regclass ORDER BY shardid LIMIT 1)
|
||||||
|
|
|
@ -1,525 +1,431 @@
|
||||||
|
-- This test file has an alternative output because of error messages vary for PG13
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13;
|
||||||
|
server_version_le_13
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
f
|
||||||
|
(1 row)
|
||||||
|
|
||||||
CREATE SCHEMA generated_identities;
|
CREATE SCHEMA generated_identities;
|
||||||
SET search_path TO generated_identities;
|
SET search_path TO generated_identities;
|
||||||
SET client_min_messages to ERROR;
|
SET client_min_messages to ERROR;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
SELECT 1 from citus_add_node('localhost', :master_port, groupId=>0);
|
SELECT 1 from citus_add_node('localhost', :master_port, groupId=>0);
|
||||||
?column?
|
?column?
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
1
|
1
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
DROP TABLE IF EXISTS generated_identities_test;
|
-- smallint identity column can not be distributed
|
||||||
-- create a partitioned table for testing.
|
CREATE TABLE smallint_identity_column (
|
||||||
CREATE TABLE generated_identities_test (
|
a smallint GENERATED BY DEFAULT AS IDENTITY
|
||||||
a int CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY,
|
);
|
||||||
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
SELECT create_distributed_table('smallint_identity_column', 'a');
|
||||||
c smallint GENERATED BY DEFAULT AS IDENTITY,
|
ERROR: cannot complete operation on generated_identities.smallint_identity_column with smallint/int identity column
|
||||||
d serial,
|
HINT: Use bigint identity column instead.
|
||||||
e bigserial,
|
SELECT create_distributed_table_concurrently('smallint_identity_column', 'a');
|
||||||
f smallserial,
|
ERROR: cannot complete operation on generated_identities.smallint_identity_column with smallint/int identity column
|
||||||
g int
|
HINT: Use bigint identity column instead.
|
||||||
)
|
SELECT create_reference_table('smallint_identity_column');
|
||||||
PARTITION BY RANGE (a);
|
ERROR: cannot complete operation on a table with identity column
|
||||||
CREATE TABLE generated_identities_test_1_5 PARTITION OF generated_identities_test FOR VALUES FROM (1) TO (5);
|
SELECT citus_add_local_table_to_metadata('smallint_identity_column');
|
||||||
CREATE TABLE generated_identities_test_5_50 PARTITION OF generated_identities_test FOR VALUES FROM (5) TO (50);
|
|
||||||
-- local tables
|
|
||||||
SELECT citus_add_local_table_to_metadata('generated_identities_test');
|
|
||||||
citus_add_local_table_to_metadata
|
citus_add_local_table_to_metadata
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\d generated_identities_test
|
DROP TABLE smallint_identity_column;
|
||||||
Partitioned table "generated_identities.generated_identities_test"
|
-- int identity column can not be distributed
|
||||||
Column | Type | Collation | Nullable | Default
|
CREATE TABLE int_identity_column (
|
||||||
---------------------------------------------------------------------
|
a int GENERATED BY DEFAULT AS IDENTITY
|
||||||
a | integer | | not null | generated by default as identity
|
);
|
||||||
b | bigint | | not null | generated always as identity
|
SELECT create_distributed_table('int_identity_column', 'a');
|
||||||
c | smallint | | not null | generated by default as identity
|
ERROR: cannot complete operation on generated_identities.int_identity_column with smallint/int identity column
|
||||||
d | integer | | not null | nextval('generated_identities_test_d_seq'::regclass)
|
HINT: Use bigint identity column instead.
|
||||||
e | bigint | | not null | nextval('generated_identities_test_e_seq'::regclass)
|
SELECT create_distributed_table_concurrently('int_identity_column', 'a');
|
||||||
f | smallint | | not null | nextval('generated_identities_test_f_seq'::regclass)
|
ERROR: cannot complete operation on generated_identities.int_identity_column with smallint/int identity column
|
||||||
g | integer | | |
|
HINT: Use bigint identity column instead.
|
||||||
Partition key: RANGE (a)
|
SELECT create_reference_table('int_identity_column');
|
||||||
Number of partitions: 2 (Use \d+ to list them.)
|
ERROR: cannot complete operation on a table with identity column
|
||||||
|
SELECT citus_add_local_table_to_metadata('int_identity_column');
|
||||||
\c - - - :worker_1_port
|
citus_add_local_table_to_metadata
|
||||||
\d generated_identities.generated_identities_test
|
|
||||||
Partitioned table "generated_identities.generated_identities_test"
|
|
||||||
Column | Type | Collation | Nullable | Default
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
a | integer | | not null | worker_nextval('generated_identities.generated_identities_test_a_seq'::regclass)
|
|
||||||
b | bigint | | not null | nextval('generated_identities.generated_identities_test_b_seq'::regclass)
|
|
||||||
c | smallint | | not null | worker_nextval('generated_identities.generated_identities_test_c_seq'::regclass)
|
|
||||||
d | integer | | not null | worker_nextval('generated_identities.generated_identities_test_d_seq'::regclass)
|
|
||||||
e | bigint | | not null | nextval('generated_identities.generated_identities_test_e_seq'::regclass)
|
|
||||||
f | smallint | | not null | worker_nextval('generated_identities.generated_identities_test_f_seq'::regclass)
|
|
||||||
g | integer | | |
|
|
||||||
Partition key: RANGE (a)
|
|
||||||
Number of partitions: 2 (Use \d+ to list them.)
|
|
||||||
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
SELECT undistribute_table('generated_identities_test');
|
|
||||||
undistribute_table
|
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT citus_remove_node('localhost', :master_port);
|
DROP TABLE int_identity_column;
|
||||||
citus_remove_node
|
RESET citus.shard_replication_factor;
|
||||||
|
CREATE TABLE bigint_identity_column (
|
||||||
|
a bigint GENERATED BY DEFAULT AS IDENTITY,
|
||||||
|
b int
|
||||||
|
);
|
||||||
|
SELECT citus_add_local_table_to_metadata('bigint_identity_column');
|
||||||
|
citus_add_local_table_to_metadata
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT create_distributed_table('generated_identities_test', 'a');
|
DROP TABLE bigint_identity_column;
|
||||||
|
CREATE TABLE bigint_identity_column (
|
||||||
|
a bigint GENERATED BY DEFAULT AS IDENTITY,
|
||||||
|
b int
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('bigint_identity_column', 'a');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\d generated_identities_test
|
\d bigint_identity_column
|
||||||
Partitioned table "generated_identities.generated_identities_test"
|
Table "generated_identities.bigint_identity_column"
|
||||||
Column | Type | Collation | Nullable | Default
|
Column | Type | Collation | Nullable | Default
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
a | integer | | not null | generated by default as identity
|
a | bigint | | not null | generated by default as identity
|
||||||
b | bigint | | not null | generated always as identity
|
b | integer | | |
|
||||||
c | smallint | | not null | generated by default as identity
|
|
||||||
d | integer | | not null | nextval('generated_identities_test_d_seq'::regclass)
|
|
||||||
e | bigint | | not null | nextval('generated_identities_test_e_seq'::regclass)
|
|
||||||
f | smallint | | not null | nextval('generated_identities_test_f_seq'::regclass)
|
|
||||||
g | integer | | |
|
|
||||||
Partition key: RANGE (a)
|
|
||||||
Number of partitions: 2 (Use \d+ to list them.)
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d generated_identities.generated_identities_test
|
SET search_path TO generated_identities;
|
||||||
Partitioned table "generated_identities.generated_identities_test"
|
SET client_min_messages to ERROR;
|
||||||
Column | Type | Collation | Nullable | Default
|
INSERT INTO bigint_identity_column (b)
|
||||||
|
SELECT s FROM generate_series(1,10) s;
|
||||||
|
\d generated_identities.bigint_identity_column
|
||||||
|
Table "generated_identities.bigint_identity_column"
|
||||||
|
Column | Type | Collation | Nullable | Default
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
a | integer | | not null | worker_nextval('generated_identities.generated_identities_test_a_seq'::regclass)
|
a | bigint | | not null | generated by default as identity
|
||||||
b | bigint | | not null | nextval('generated_identities.generated_identities_test_b_seq'::regclass)
|
b | integer | | |
|
||||||
c | smallint | | not null | worker_nextval('generated_identities.generated_identities_test_c_seq'::regclass)
|
|
||||||
d | integer | | not null | worker_nextval('generated_identities.generated_identities_test_d_seq'::regclass)
|
|
||||||
e | bigint | | not null | nextval('generated_identities.generated_identities_test_e_seq'::regclass)
|
|
||||||
f | smallint | | not null | worker_nextval('generated_identities.generated_identities_test_f_seq'::regclass)
|
|
||||||
g | integer | | |
|
|
||||||
Partition key: RANGE (a)
|
|
||||||
Number of partitions: 2 (Use \d+ to list them.)
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET search_path TO generated_identities;
|
SET search_path TO generated_identities;
|
||||||
SET client_min_messages to ERROR;
|
SET client_min_messages to ERROR;
|
||||||
insert into generated_identities_test (g) values (1);
|
INSERT INTO bigint_identity_column (b)
|
||||||
insert into generated_identities_test (g) SELECT 2;
|
SELECT s FROM generate_series(11,20) s;
|
||||||
INSERT INTO generated_identities_test (g)
|
SELECT * FROM bigint_identity_column ORDER BY B ASC;
|
||||||
|
a | b
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
3940649673949185 | 1
|
||||||
|
3940649673949186 | 2
|
||||||
|
3940649673949187 | 3
|
||||||
|
3940649673949188 | 4
|
||||||
|
3940649673949189 | 5
|
||||||
|
3940649673949190 | 6
|
||||||
|
3940649673949191 | 7
|
||||||
|
3940649673949192 | 8
|
||||||
|
3940649673949193 | 9
|
||||||
|
3940649673949194 | 10
|
||||||
|
1 | 11
|
||||||
|
2 | 12
|
||||||
|
3 | 13
|
||||||
|
4 | 14
|
||||||
|
5 | 15
|
||||||
|
6 | 16
|
||||||
|
7 | 17
|
||||||
|
8 | 18
|
||||||
|
9 | 19
|
||||||
|
10 | 20
|
||||||
|
(20 rows)
|
||||||
|
|
||||||
|
-- table with identity column cannot be altered.
|
||||||
|
SELECT alter_distributed_table('bigint_identity_column', 'b');
|
||||||
|
ERROR: cannot complete operation on a table with identity column
|
||||||
|
-- table with identity column cannot be undistributed.
|
||||||
|
SELECT undistribute_table('bigint_identity_column');
|
||||||
|
ERROR: cannot complete operation on a table with identity column
|
||||||
|
DROP TABLE bigint_identity_column;
|
||||||
|
-- create a partitioned table for testing.
|
||||||
|
CREATE TABLE partitioned_table (
|
||||||
|
a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||||
|
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||||
|
c int
|
||||||
|
)
|
||||||
|
PARTITION BY RANGE (c);
|
||||||
|
CREATE TABLE partitioned_table_1_50 PARTITION OF partitioned_table FOR VALUES FROM (1) TO (50);
|
||||||
|
CREATE TABLE partitioned_table_50_500 PARTITION OF partitioned_table FOR VALUES FROM (50) TO (1000);
|
||||||
|
SELECT create_distributed_table('partitioned_table', 'a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\d partitioned_table
|
||||||
|
Partitioned table "generated_identities.partitioned_table"
|
||||||
|
Column | Type | Collation | Nullable | Default
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
a | bigint | | not null | generated by default as identity
|
||||||
|
b | bigint | | not null | generated always as identity
|
||||||
|
c | integer | | |
|
||||||
|
Partition key: RANGE (c)
|
||||||
|
Number of partitions: 2 (Use \d+ to list them.)
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
\d generated_identities.partitioned_table
|
||||||
|
Partitioned table "generated_identities.partitioned_table"
|
||||||
|
Column | Type | Collation | Nullable | Default
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
a | bigint | | not null | generated by default as identity
|
||||||
|
b | bigint | | not null | generated always as identity
|
||||||
|
c | integer | | |
|
||||||
|
Partition key: RANGE (c)
|
||||||
|
Number of partitions: 2 (Use \d+ to list them.)
|
||||||
|
|
||||||
|
insert into partitioned_table (c) values (1);
|
||||||
|
insert into partitioned_table (c) SELECT 2;
|
||||||
|
INSERT INTO partitioned_table (c)
|
||||||
SELECT s FROM generate_series(3,7) s;
|
SELECT s FROM generate_series(3,7) s;
|
||||||
SELECT * FROM generated_identities_test ORDER BY 1;
|
|
||||||
a | b | c | d | e | f | g
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
1 | 10 | 1 | 1 | 1 | 1 | 1
|
|
||||||
2 | 20 | 2 | 2 | 2 | 2 | 2
|
|
||||||
3 | 30 | 3 | 3 | 3 | 3 | 3
|
|
||||||
4 | 40 | 4 | 4 | 4 | 4 | 4
|
|
||||||
5 | 50 | 5 | 5 | 5 | 5 | 5
|
|
||||||
6 | 60 | 6 | 6 | 6 | 6 | 6
|
|
||||||
7 | 70 | 7 | 7 | 7 | 7 | 7
|
|
||||||
(7 rows)
|
|
||||||
|
|
||||||
SELECT undistribute_table('generated_identities_test');
|
|
||||||
undistribute_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT * FROM generated_identities_test ORDER BY 1;
|
|
||||||
a | b | c | d | e | f | g
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
1 | 10 | 1 | 1 | 1 | 1 | 1
|
|
||||||
2 | 20 | 2 | 2 | 2 | 2 | 2
|
|
||||||
3 | 30 | 3 | 3 | 3 | 3 | 3
|
|
||||||
4 | 40 | 4 | 4 | 4 | 4 | 4
|
|
||||||
5 | 50 | 5 | 5 | 5 | 5 | 5
|
|
||||||
6 | 60 | 6 | 6 | 6 | 6 | 6
|
|
||||||
7 | 70 | 7 | 7 | 7 | 7 | 7
|
|
||||||
(7 rows)
|
|
||||||
|
|
||||||
\d generated_identities_test
|
|
||||||
Partitioned table "generated_identities.generated_identities_test"
|
|
||||||
Column | Type | Collation | Nullable | Default
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
a | integer | | not null | generated by default as identity
|
|
||||||
b | bigint | | not null | generated always as identity
|
|
||||||
c | smallint | | not null | generated by default as identity
|
|
||||||
d | integer | | not null | nextval('generated_identities_test_d_seq'::regclass)
|
|
||||||
e | bigint | | not null | nextval('generated_identities_test_e_seq'::regclass)
|
|
||||||
f | smallint | | not null | nextval('generated_identities_test_f_seq'::regclass)
|
|
||||||
g | integer | | |
|
|
||||||
Partition key: RANGE (a)
|
|
||||||
Number of partitions: 2 (Use \d+ to list them.)
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
\d generated_identities.generated_identities_test
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET search_path TO generated_identities;
|
SET search_path TO generated_identities;
|
||||||
SET client_min_messages to ERROR;
|
SET client_min_messages to ERROR;
|
||||||
INSERT INTO generated_identities_test (g)
|
INSERT INTO partitioned_table (c)
|
||||||
SELECT s FROM generate_series(8,10) s;
|
SELECT s FROM generate_series(10,20) s;
|
||||||
SELECT * FROM generated_identities_test ORDER BY 1;
|
INSERT INTO partitioned_table (a,c) VALUES (998,998);
|
||||||
a | b | c | d | e | f | g
|
INSERT INTO partitioned_table (a,b,c) OVERRIDING SYSTEM VALUE VALUES (999,999,999);
|
||||||
|
SELECT * FROM partitioned_table ORDER BY c ASC;
|
||||||
|
a | b | c
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
1 | 10 | 1 | 1 | 1 | 1 | 1
|
3940649673949185 | 3940649673949185 | 1
|
||||||
2 | 20 | 2 | 2 | 2 | 2 | 2
|
3940649673949195 | 3940649673949195 | 2
|
||||||
3 | 30 | 3 | 3 | 3 | 3 | 3
|
3940649673949205 | 3940649673949205 | 3
|
||||||
4 | 40 | 4 | 4 | 4 | 4 | 4
|
3940649673949215 | 3940649673949215 | 4
|
||||||
5 | 50 | 5 | 5 | 5 | 5 | 5
|
3940649673949225 | 3940649673949225 | 5
|
||||||
6 | 60 | 6 | 6 | 6 | 6 | 6
|
3940649673949235 | 3940649673949235 | 6
|
||||||
7 | 70 | 7 | 7 | 7 | 7 | 7
|
3940649673949245 | 3940649673949245 | 7
|
||||||
8 | 80 | 8 | 8 | 8 | 8 | 8
|
10 | 10 | 10
|
||||||
9 | 90 | 9 | 9 | 9 | 9 | 9
|
20 | 20 | 11
|
||||||
10 | 100 | 10 | 10 | 10 | 10 | 10
|
30 | 30 | 12
|
||||||
(10 rows)
|
40 | 40 | 13
|
||||||
|
50 | 50 | 14
|
||||||
-- distributed table
|
60 | 60 | 15
|
||||||
SELECT create_distributed_table('generated_identities_test', 'a');
|
70 | 70 | 16
|
||||||
create_distributed_table
|
80 | 80 | 17
|
||||||
---------------------------------------------------------------------
|
90 | 90 | 18
|
||||||
|
100 | 100 | 19
|
||||||
(1 row)
|
110 | 110 | 20
|
||||||
|
998 | 120 | 998
|
||||||
|
999 | 999 | 999
|
||||||
|
(20 rows)
|
||||||
|
|
||||||
-- alter table .. alter column .. add is unsupported
|
-- alter table .. alter column .. add is unsupported
|
||||||
ALTER TABLE generated_identities_test ALTER COLUMN g ADD GENERATED ALWAYS AS IDENTITY;
|
ALTER TABLE partitioned_table ALTER COLUMN g ADD GENERATED ALWAYS AS IDENTITY;
|
||||||
ERROR: alter table command is currently unsupported
|
ERROR: alter table command is currently unsupported
|
||||||
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
|
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
|
||||||
-- alter table .. alter column is unsupported
|
-- alter table .. alter column is unsupported
|
||||||
ALTER TABLE generated_identities_test ALTER COLUMN b TYPE int;
|
ALTER TABLE partitioned_table ALTER COLUMN b TYPE int;
|
||||||
ERROR: cannot execute ALTER COLUMN command involving identity column
|
ERROR: cannot execute ALTER COLUMN command involving identity column
|
||||||
SELECT alter_distributed_table('generated_identities_test', 'g');
|
DROP TABLE partitioned_table;
|
||||||
alter_distributed_table
|
-- create a table for reference table testing.
|
||||||
---------------------------------------------------------------------
|
CREATE TABLE reference_table (
|
||||||
|
a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||||
(1 row)
|
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10) UNIQUE,
|
||||||
|
c int
|
||||||
SELECT alter_distributed_table('generated_identities_test', 'b');
|
|
||||||
alter_distributed_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT alter_distributed_table('generated_identities_test', 'c');
|
|
||||||
alter_distributed_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT undistribute_table('generated_identities_test');
|
|
||||||
undistribute_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT * FROM generated_identities_test ORDER BY g;
|
|
||||||
a | b | c | d | e | f | g
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
1 | 10 | 1 | 1 | 1 | 1 | 1
|
|
||||||
2 | 20 | 2 | 2 | 2 | 2 | 2
|
|
||||||
3 | 30 | 3 | 3 | 3 | 3 | 3
|
|
||||||
4 | 40 | 4 | 4 | 4 | 4 | 4
|
|
||||||
5 | 50 | 5 | 5 | 5 | 5 | 5
|
|
||||||
6 | 60 | 6 | 6 | 6 | 6 | 6
|
|
||||||
7 | 70 | 7 | 7 | 7 | 7 | 7
|
|
||||||
8 | 80 | 8 | 8 | 8 | 8 | 8
|
|
||||||
9 | 90 | 9 | 9 | 9 | 9 | 9
|
|
||||||
10 | 100 | 10 | 10 | 10 | 10 | 10
|
|
||||||
(10 rows)
|
|
||||||
|
|
||||||
-- reference table
|
|
||||||
DROP TABLE generated_identities_test;
|
|
||||||
CREATE TABLE generated_identities_test (
|
|
||||||
a int GENERATED BY DEFAULT AS IDENTITY,
|
|
||||||
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
|
||||||
c smallint GENERATED BY DEFAULT AS IDENTITY,
|
|
||||||
d serial,
|
|
||||||
e bigserial,
|
|
||||||
f smallserial,
|
|
||||||
g int
|
|
||||||
);
|
);
|
||||||
SELECT create_reference_table('generated_identities_test');
|
SELECT create_reference_table('reference_table');
|
||||||
create_reference_table
|
create_reference_table
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\d generated_identities_test
|
\d reference_table
|
||||||
Table "generated_identities.generated_identities_test"
|
Table "generated_identities.reference_table"
|
||||||
Column | Type | Collation | Nullable | Default
|
Column | Type | Collation | Nullable | Default
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
a | integer | | not null | generated by default as identity
|
a | bigint | | not null | generated by default as identity
|
||||||
b | bigint | | not null | generated always as identity
|
b | bigint | | not null | generated always as identity
|
||||||
c | smallint | | not null | generated by default as identity
|
c | integer | | |
|
||||||
d | integer | | not null | nextval('generated_identities_test_d_seq'::regclass)
|
Indexes:
|
||||||
e | bigint | | not null | nextval('generated_identities_test_e_seq'::regclass)
|
"reference_table_b_key" UNIQUE CONSTRAINT, btree (b)
|
||||||
f | smallint | | not null | nextval('generated_identities_test_f_seq'::regclass)
|
|
||||||
g | integer | | |
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d generated_identities.generated_identities_test
|
SET search_path TO generated_identities;
|
||||||
Table "generated_identities.generated_identities_test"
|
\d generated_identities.reference_table
|
||||||
Column | Type | Collation | Nullable | Default
|
Table "generated_identities.reference_table"
|
||||||
|
Column | Type | Collation | Nullable | Default
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
a | integer | | not null | worker_nextval('generated_identities.generated_identities_test_a_seq'::regclass)
|
a | bigint | | not null | generated by default as identity
|
||||||
b | bigint | | not null | nextval('generated_identities.generated_identities_test_b_seq'::regclass)
|
b | bigint | | not null | generated always as identity
|
||||||
c | smallint | | not null | worker_nextval('generated_identities.generated_identities_test_c_seq'::regclass)
|
c | integer | | |
|
||||||
d | integer | | not null | worker_nextval('generated_identities.generated_identities_test_d_seq'::regclass)
|
Indexes:
|
||||||
e | bigint | | not null | nextval('generated_identities.generated_identities_test_e_seq'::regclass)
|
"reference_table_b_key" UNIQUE CONSTRAINT, btree (b)
|
||||||
f | smallint | | not null | worker_nextval('generated_identities.generated_identities_test_f_seq'::regclass)
|
|
||||||
g | integer | | |
|
INSERT INTO reference_table (c)
|
||||||
|
SELECT s FROM generate_series(1,10) s;
|
||||||
|
--on master
|
||||||
|
select * from reference_table;
|
||||||
|
a | b | c
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
3940649673949185 | 3940649673949185 | 1
|
||||||
|
3940649673949195 | 3940649673949195 | 2
|
||||||
|
3940649673949205 | 3940649673949205 | 3
|
||||||
|
3940649673949215 | 3940649673949215 | 4
|
||||||
|
3940649673949225 | 3940649673949225 | 5
|
||||||
|
3940649673949235 | 3940649673949235 | 6
|
||||||
|
3940649673949245 | 3940649673949245 | 7
|
||||||
|
3940649673949255 | 3940649673949255 | 8
|
||||||
|
3940649673949265 | 3940649673949265 | 9
|
||||||
|
3940649673949275 | 3940649673949275 | 10
|
||||||
|
(10 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET search_path TO generated_identities;
|
SET search_path TO generated_identities;
|
||||||
SET client_min_messages to ERROR;
|
SET client_min_messages to ERROR;
|
||||||
INSERT INTO generated_identities_test (g)
|
INSERT INTO reference_table (c)
|
||||||
SELECT s FROM generate_series(11,20) s;
|
SELECT s FROM generate_series(11,20) s;
|
||||||
SELECT * FROM generated_identities_test ORDER BY g;
|
SELECT * FROM reference_table ORDER BY c ASC;
|
||||||
a | b | c | d | e | f | g
|
a | b | c
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
1 | 10 | 1 | 1 | 1 | 1 | 11
|
3940649673949185 | 3940649673949185 | 1
|
||||||
2 | 20 | 2 | 2 | 2 | 2 | 12
|
3940649673949195 | 3940649673949195 | 2
|
||||||
3 | 30 | 3 | 3 | 3 | 3 | 13
|
3940649673949205 | 3940649673949205 | 3
|
||||||
4 | 40 | 4 | 4 | 4 | 4 | 14
|
3940649673949215 | 3940649673949215 | 4
|
||||||
5 | 50 | 5 | 5 | 5 | 5 | 15
|
3940649673949225 | 3940649673949225 | 5
|
||||||
6 | 60 | 6 | 6 | 6 | 6 | 16
|
3940649673949235 | 3940649673949235 | 6
|
||||||
7 | 70 | 7 | 7 | 7 | 7 | 17
|
3940649673949245 | 3940649673949245 | 7
|
||||||
8 | 80 | 8 | 8 | 8 | 8 | 18
|
3940649673949255 | 3940649673949255 | 8
|
||||||
9 | 90 | 9 | 9 | 9 | 9 | 19
|
3940649673949265 | 3940649673949265 | 9
|
||||||
10 | 100 | 10 | 10 | 10 | 10 | 20
|
3940649673949275 | 3940649673949275 | 10
|
||||||
(10 rows)
|
10 | 10 | 11
|
||||||
|
20 | 20 | 12
|
||||||
|
30 | 30 | 13
|
||||||
|
40 | 40 | 14
|
||||||
|
50 | 50 | 15
|
||||||
|
60 | 60 | 16
|
||||||
|
70 | 70 | 17
|
||||||
|
80 | 80 | 18
|
||||||
|
90 | 90 | 19
|
||||||
|
100 | 100 | 20
|
||||||
|
(20 rows)
|
||||||
|
|
||||||
SELECT undistribute_table('generated_identities_test');
|
DROP TABLE reference_table;
|
||||||
undistribute_table
|
CREATE TABLE color (
|
||||||
|
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
||||||
|
color_name VARCHAR NOT NULL
|
||||||
|
);
|
||||||
|
-- https://github.com/citusdata/citus/issues/6694
|
||||||
|
CREATE USER identity_test_user;
|
||||||
|
GRANT INSERT ON color TO identity_test_user;
|
||||||
|
GRANT USAGE ON SCHEMA generated_identities TO identity_test_user;
|
||||||
|
SET ROLE identity_test_user;
|
||||||
|
SELECT create_distributed_table('color', 'color_id');
|
||||||
|
ERROR: must be owner of table color
|
||||||
|
SET ROLE postgres;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT create_distributed_table_concurrently('color', 'color_id');
|
||||||
|
create_distributed_table_concurrently
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\d generated_identities_test
|
RESET citus.shard_replication_factor;
|
||||||
Table "generated_identities.generated_identities_test"
|
\c - identity_test_user - :worker_1_port
|
||||||
Column | Type | Collation | Nullable | Default
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
INSERT INTO color(color_name) VALUES ('Blue');
|
||||||
|
\c - postgres - :master_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
SET citus.next_shard_id TO 12400000;
|
||||||
|
DROP TABLE Color;
|
||||||
|
CREATE TABLE color (
|
||||||
|
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
||||||
|
color_name VARCHAR NOT NULL
|
||||||
|
) USING columnar;
|
||||||
|
SELECT create_distributed_table('color', 'color_id');
|
||||||
|
create_distributed_table
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
a | integer | | not null | generated by default as identity
|
|
||||||
b | bigint | | not null | generated always as identity
|
(1 row)
|
||||||
c | smallint | | not null | generated by default as identity
|
|
||||||
d | integer | | not null | nextval('generated_identities_test_d_seq'::regclass)
|
INSERT INTO color(color_name) VALUES ('Blue');
|
||||||
e | bigint | | not null | nextval('generated_identities_test_e_seq'::regclass)
|
\d+ color
|
||||||
f | smallint | | not null | nextval('generated_identities_test_f_seq'::regclass)
|
Table "generated_identities.color"
|
||||||
g | integer | | |
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
color_id | bigint | | not null | generated always as identity | plain | |
|
||||||
|
color_name | character varying | | not null | | extended | |
|
||||||
|
Indexes:
|
||||||
|
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d generated_identities.generated_identities_test
|
SET search_path TO generated_identities;
|
||||||
\c - - - :master_port
|
\d+ color
|
||||||
|
Table "generated_identities.color"
|
||||||
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
color_id | bigint | | not null | generated always as identity | plain | |
|
||||||
|
color_name | character varying | | not null | | extended | |
|
||||||
|
Indexes:
|
||||||
|
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
||||||
|
|
||||||
|
INSERT INTO color(color_name) VALUES ('Red');
|
||||||
|
-- alter sequence .. restart
|
||||||
|
ALTER SEQUENCE color_color_id_seq RESTART WITH 1000;
|
||||||
|
ERROR: Altering a distributed sequence is currently not supported.
|
||||||
|
-- override system value
|
||||||
|
INSERT INTO color(color_id, color_name) VALUES (1, 'Red');
|
||||||
|
ERROR: cannot insert a non-DEFAULT value into column "color_id"
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||||
|
INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red');
|
||||||
|
ERROR: cannot insert a non-DEFAULT value into column "color_id"
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||||
|
INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red');
|
||||||
|
ERROR: duplicate key value violates unique constraint "color_color_id_key_12400000"
|
||||||
|
DETAIL: Key (color_id)=(1) already exists.
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
-- update null or custom value
|
||||||
|
UPDATE color SET color_id = NULL;
|
||||||
|
ERROR: column "color_id" can only be updated to DEFAULT
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
UPDATE color SET color_id = 1;
|
||||||
|
ERROR: column "color_id" can only be updated to DEFAULT
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
\c - postgres - :master_port
|
||||||
SET search_path TO generated_identities;
|
SET search_path TO generated_identities;
|
||||||
SET client_min_messages to ERROR;
|
SET client_min_messages to ERROR;
|
||||||
-- alter table .. add column .. GENERATED .. AS IDENTITY
|
-- alter table .. add column .. GENERATED .. AS IDENTITY
|
||||||
DROP TABLE IF EXISTS color;
|
|
||||||
CREATE TABLE color (
|
|
||||||
color_name VARCHAR NOT NULL
|
|
||||||
);
|
|
||||||
SELECT create_distributed_table('color', 'color_name');
|
|
||||||
create_distributed_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
ALTER TABLE color ADD COLUMN color_id BIGINT GENERATED ALWAYS AS IDENTITY;
|
ALTER TABLE color ADD COLUMN color_id BIGINT GENERATED ALWAYS AS IDENTITY;
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers
|
||||||
ALTER TABLE color ADD COLUMN color_id_1 BIGINT GENERATED ALWAYS AS IDENTITY;
|
-- alter sequence .. restart
|
||||||
ERROR: Cannot add an identity column because the table is not empty
|
ALTER SEQUENCE color_color_id_seq RESTART WITH 1000;
|
||||||
DROP TABLE color;
|
ERROR: Altering a distributed sequence is currently not supported.
|
||||||
-- insert data from workers
|
-- override system value
|
||||||
CREATE TABLE color (
|
INSERT INTO color(color_id, color_name) VALUES (1, 'Red');
|
||||||
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
ERROR: cannot insert a non-DEFAULT value into column "color_id"
|
||||||
color_name VARCHAR NOT NULL
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
);
|
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||||
SELECT create_distributed_table('color', 'color_id');
|
INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red');
|
||||||
|
ERROR: cannot insert a non-DEFAULT value into column "color_id"
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||||
|
INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red');
|
||||||
|
ERROR: duplicate key value violates unique constraint "color_color_id_key_12400000"
|
||||||
|
DETAIL: Key (color_id)=(1) already exists.
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
-- update null or custom value
|
||||||
|
UPDATE color SET color_id = NULL;
|
||||||
|
ERROR: column "color_id" can only be updated to DEFAULT
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
UPDATE color SET color_id = 1;
|
||||||
|
ERROR: column "color_id" can only be updated to DEFAULT
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
DROP TABLE IF EXISTS test;
|
||||||
|
CREATE TABLE test (x int, y int, z bigint generated by default as identity);
|
||||||
|
SELECT create_distributed_table('test', 'x', colocate_with := 'none');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
INSERT INTO test VALUES (1,2);
|
||||||
SET search_path TO generated_identities;
|
INSERT INTO test SELECT x, y FROM test WHERE x = 1;
|
||||||
SET client_min_messages to ERROR;
|
SELECT * FROM test;
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
x | y | z
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
SELECT undistribute_table('color');
|
|
||||||
undistribute_table
|
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
1 | 2 | 1
|
||||||
|
1 | 2 | 2
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
SELECT create_distributed_table('color', 'color_id');
|
|
||||||
create_distributed_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
SELECT count(*) from color;
|
|
||||||
count
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
3
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-- modify sequence & alter table
|
|
||||||
DROP TABLE color;
|
|
||||||
CREATE TABLE color (
|
|
||||||
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
|
||||||
color_name VARCHAR NOT NULL
|
|
||||||
);
|
|
||||||
SELECT create_distributed_table('color', 'color_id');
|
|
||||||
create_distributed_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
SELECT undistribute_table('color');
|
|
||||||
undistribute_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
ALTER SEQUENCE color_color_id_seq RENAME TO myseq;
|
|
||||||
SELECT create_distributed_table('color', 'color_id');
|
|
||||||
create_distributed_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
\ds+ myseq
|
|
||||||
List of relations
|
|
||||||
Schema | Name | Type | Owner | Persistence | Size | Description
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
generated_identities | myseq | sequence | postgres | permanent | 8192 bytes |
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
\ds+ color_color_id_seq
|
|
||||||
List of relations
|
|
||||||
Schema | Name | Type | Owner | Persistence | Size | Description
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
(0 rows)
|
|
||||||
|
|
||||||
\d color
|
|
||||||
Table "generated_identities.color"
|
|
||||||
Column | Type | Collation | Nullable | Default
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
color_id | bigint | | not null | generated always as identity
|
|
||||||
color_name | character varying | | not null |
|
|
||||||
Indexes:
|
|
||||||
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
\ds+ myseq
|
|
||||||
List of relations
|
|
||||||
Schema | Name | Type | Owner | Persistence | Size | Description
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
generated_identities | myseq | sequence | postgres | permanent | 8192 bytes |
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
\ds+ color_color_id_seq
|
|
||||||
List of relations
|
|
||||||
Schema | Name | Type | Owner | Persistence | Size | Description
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
(0 rows)
|
|
||||||
|
|
||||||
\d color
|
|
||||||
Table "generated_identities.color"
|
|
||||||
Column | Type | Collation | Nullable | Default
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
color_id | bigint | | not null | nextval('myseq'::regclass)
|
|
||||||
color_name | character varying | | not null |
|
|
||||||
Indexes:
|
|
||||||
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
ALTER SEQUENCE myseq RENAME TO color_color_id_seq;
|
|
||||||
\ds+ myseq
|
|
||||||
List of relations
|
|
||||||
Schema | Name | Type | Owner | Persistence | Size | Description
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
(0 rows)
|
|
||||||
|
|
||||||
\ds+ color_color_id_seq
|
|
||||||
List of relations
|
|
||||||
Schema | Name | Type | Owner | Persistence | Size | Description
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
generated_identities | color_color_id_seq | sequence | postgres | permanent | 8192 bytes |
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
\ds+ myseq
|
|
||||||
List of relations
|
|
||||||
Schema | Name | Type | Owner | Persistence | Size | Description
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
(0 rows)
|
|
||||||
|
|
||||||
\ds+ color_color_id_seq
|
|
||||||
List of relations
|
|
||||||
Schema | Name | Type | Owner | Persistence | Size | Description
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
generated_identities | color_color_id_seq | sequence | postgres | permanent | 8192 bytes |
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
\d color
|
|
||||||
Table "generated_identities.color"
|
|
||||||
Column | Type | Collation | Nullable | Default
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
color_id | bigint | | not null | nextval('color_color_id_seq'::regclass)
|
|
||||||
color_name | character varying | | not null |
|
|
||||||
Indexes:
|
|
||||||
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
SELECT alter_distributed_table('co23423lor', shard_count := 6);
|
|
||||||
ERROR: relation "co23423lor" does not exist
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
\ds+ color_color_id_seq
|
|
||||||
List of relations
|
|
||||||
Schema | Name | Type | Owner | Persistence | Size | Description
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
generated_identities | color_color_id_seq | sequence | postgres | permanent | 8192 bytes |
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
DROP SCHEMA generated_identities CASCADE;
|
DROP SCHEMA generated_identities CASCADE;
|
||||||
|
DROP USER identity_test_user;
|
||||||
|
|
|
@ -0,0 +1,431 @@
|
||||||
|
-- This test file has an alternative output because of error messages vary for PG13
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13;
|
||||||
|
server_version_le_13
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CREATE SCHEMA generated_identities;
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT 1 from citus_add_node('localhost', :master_port, groupId=>0);
|
||||||
|
?column?
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- smallint identity column can not be distributed
|
||||||
|
CREATE TABLE smallint_identity_column (
|
||||||
|
a smallint GENERATED BY DEFAULT AS IDENTITY
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('smallint_identity_column', 'a');
|
||||||
|
ERROR: cannot complete operation on generated_identities.smallint_identity_column with smallint/int identity column
|
||||||
|
HINT: Use bigint identity column instead.
|
||||||
|
SELECT create_distributed_table_concurrently('smallint_identity_column', 'a');
|
||||||
|
ERROR: cannot complete operation on generated_identities.smallint_identity_column with smallint/int identity column
|
||||||
|
HINT: Use bigint identity column instead.
|
||||||
|
SELECT create_reference_table('smallint_identity_column');
|
||||||
|
ERROR: cannot complete operation on a table with identity column
|
||||||
|
SELECT citus_add_local_table_to_metadata('smallint_identity_column');
|
||||||
|
citus_add_local_table_to_metadata
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE smallint_identity_column;
|
||||||
|
-- int identity column can not be distributed
|
||||||
|
CREATE TABLE int_identity_column (
|
||||||
|
a int GENERATED BY DEFAULT AS IDENTITY
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('int_identity_column', 'a');
|
||||||
|
ERROR: cannot complete operation on generated_identities.int_identity_column with smallint/int identity column
|
||||||
|
HINT: Use bigint identity column instead.
|
||||||
|
SELECT create_distributed_table_concurrently('int_identity_column', 'a');
|
||||||
|
ERROR: cannot complete operation on generated_identities.int_identity_column with smallint/int identity column
|
||||||
|
HINT: Use bigint identity column instead.
|
||||||
|
SELECT create_reference_table('int_identity_column');
|
||||||
|
ERROR: cannot complete operation on a table with identity column
|
||||||
|
SELECT citus_add_local_table_to_metadata('int_identity_column');
|
||||||
|
citus_add_local_table_to_metadata
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE int_identity_column;
|
||||||
|
RESET citus.shard_replication_factor;
|
||||||
|
CREATE TABLE bigint_identity_column (
|
||||||
|
a bigint GENERATED BY DEFAULT AS IDENTITY,
|
||||||
|
b int
|
||||||
|
);
|
||||||
|
SELECT citus_add_local_table_to_metadata('bigint_identity_column');
|
||||||
|
citus_add_local_table_to_metadata
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE bigint_identity_column;
|
||||||
|
CREATE TABLE bigint_identity_column (
|
||||||
|
a bigint GENERATED BY DEFAULT AS IDENTITY,
|
||||||
|
b int
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('bigint_identity_column', 'a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\d bigint_identity_column
|
||||||
|
Table "generated_identities.bigint_identity_column"
|
||||||
|
Column | Type | Collation | Nullable | Default
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
a | bigint | | not null | generated by default as identity
|
||||||
|
b | integer | | |
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
INSERT INTO bigint_identity_column (b)
|
||||||
|
SELECT s FROM generate_series(1,10) s;
|
||||||
|
\d generated_identities.bigint_identity_column
|
||||||
|
Table "generated_identities.bigint_identity_column"
|
||||||
|
Column | Type | Collation | Nullable | Default
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
a | bigint | | not null | generated by default as identity
|
||||||
|
b | integer | | |
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
INSERT INTO bigint_identity_column (b)
|
||||||
|
SELECT s FROM generate_series(11,20) s;
|
||||||
|
SELECT * FROM bigint_identity_column ORDER BY B ASC;
|
||||||
|
a | b
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
3940649673949185 | 1
|
||||||
|
3940649673949186 | 2
|
||||||
|
3940649673949187 | 3
|
||||||
|
3940649673949188 | 4
|
||||||
|
3940649673949189 | 5
|
||||||
|
3940649673949190 | 6
|
||||||
|
3940649673949191 | 7
|
||||||
|
3940649673949192 | 8
|
||||||
|
3940649673949193 | 9
|
||||||
|
3940649673949194 | 10
|
||||||
|
1 | 11
|
||||||
|
2 | 12
|
||||||
|
3 | 13
|
||||||
|
4 | 14
|
||||||
|
5 | 15
|
||||||
|
6 | 16
|
||||||
|
7 | 17
|
||||||
|
8 | 18
|
||||||
|
9 | 19
|
||||||
|
10 | 20
|
||||||
|
(20 rows)
|
||||||
|
|
||||||
|
-- table with identity column cannot be altered.
|
||||||
|
SELECT alter_distributed_table('bigint_identity_column', 'b');
|
||||||
|
ERROR: cannot complete operation on a table with identity column
|
||||||
|
-- table with identity column cannot be undistributed.
|
||||||
|
SELECT undistribute_table('bigint_identity_column');
|
||||||
|
ERROR: cannot complete operation on a table with identity column
|
||||||
|
DROP TABLE bigint_identity_column;
|
||||||
|
-- create a partitioned table for testing.
|
||||||
|
CREATE TABLE partitioned_table (
|
||||||
|
a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||||
|
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||||
|
c int
|
||||||
|
)
|
||||||
|
PARTITION BY RANGE (c);
|
||||||
|
CREATE TABLE partitioned_table_1_50 PARTITION OF partitioned_table FOR VALUES FROM (1) TO (50);
|
||||||
|
CREATE TABLE partitioned_table_50_500 PARTITION OF partitioned_table FOR VALUES FROM (50) TO (1000);
|
||||||
|
SELECT create_distributed_table('partitioned_table', 'a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\d partitioned_table
|
||||||
|
Partitioned table "generated_identities.partitioned_table"
|
||||||
|
Column | Type | Collation | Nullable | Default
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
a | bigint | | not null | generated by default as identity
|
||||||
|
b | bigint | | not null | generated always as identity
|
||||||
|
c | integer | | |
|
||||||
|
Partition key: RANGE (c)
|
||||||
|
Number of partitions: 2 (Use \d+ to list them.)
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
\d generated_identities.partitioned_table
|
||||||
|
Partitioned table "generated_identities.partitioned_table"
|
||||||
|
Column | Type | Collation | Nullable | Default
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
a | bigint | | not null | generated by default as identity
|
||||||
|
b | bigint | | not null | generated always as identity
|
||||||
|
c | integer | | |
|
||||||
|
Partition key: RANGE (c)
|
||||||
|
Number of partitions: 2 (Use \d+ to list them.)
|
||||||
|
|
||||||
|
insert into partitioned_table (c) values (1);
|
||||||
|
insert into partitioned_table (c) SELECT 2;
|
||||||
|
INSERT INTO partitioned_table (c)
|
||||||
|
SELECT s FROM generate_series(3,7) s;
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
INSERT INTO partitioned_table (c)
|
||||||
|
SELECT s FROM generate_series(10,20) s;
|
||||||
|
INSERT INTO partitioned_table (a,c) VALUES (998,998);
|
||||||
|
INSERT INTO partitioned_table (a,b,c) OVERRIDING SYSTEM VALUE VALUES (999,999,999);
|
||||||
|
SELECT * FROM partitioned_table ORDER BY c ASC;
|
||||||
|
a | b | c
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
3940649673949185 | 3940649673949185 | 1
|
||||||
|
3940649673949195 | 3940649673949195 | 2
|
||||||
|
3940649673949205 | 3940649673949205 | 3
|
||||||
|
3940649673949215 | 3940649673949215 | 4
|
||||||
|
3940649673949225 | 3940649673949225 | 5
|
||||||
|
3940649673949235 | 3940649673949235 | 6
|
||||||
|
3940649673949245 | 3940649673949245 | 7
|
||||||
|
10 | 10 | 10
|
||||||
|
20 | 20 | 11
|
||||||
|
30 | 30 | 12
|
||||||
|
40 | 40 | 13
|
||||||
|
50 | 50 | 14
|
||||||
|
60 | 60 | 15
|
||||||
|
70 | 70 | 16
|
||||||
|
80 | 80 | 17
|
||||||
|
90 | 90 | 18
|
||||||
|
100 | 100 | 19
|
||||||
|
110 | 110 | 20
|
||||||
|
998 | 120 | 998
|
||||||
|
999 | 999 | 999
|
||||||
|
(20 rows)
|
||||||
|
|
||||||
|
-- alter table .. alter column .. add is unsupported
|
||||||
|
ALTER TABLE partitioned_table ALTER COLUMN g ADD GENERATED ALWAYS AS IDENTITY;
|
||||||
|
ERROR: alter table command is currently unsupported
|
||||||
|
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
|
||||||
|
-- alter table .. alter column is unsupported
|
||||||
|
ALTER TABLE partitioned_table ALTER COLUMN b TYPE int;
|
||||||
|
ERROR: cannot execute ALTER COLUMN command involving identity column
|
||||||
|
DROP TABLE partitioned_table;
|
||||||
|
-- create a table for reference table testing.
|
||||||
|
CREATE TABLE reference_table (
|
||||||
|
a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||||
|
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10) UNIQUE,
|
||||||
|
c int
|
||||||
|
);
|
||||||
|
SELECT create_reference_table('reference_table');
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\d reference_table
|
||||||
|
Table "generated_identities.reference_table"
|
||||||
|
Column | Type | Collation | Nullable | Default
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
a | bigint | | not null | generated by default as identity
|
||||||
|
b | bigint | | not null | generated always as identity
|
||||||
|
c | integer | | |
|
||||||
|
Indexes:
|
||||||
|
"reference_table_b_key" UNIQUE CONSTRAINT, btree (b)
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
\d generated_identities.reference_table
|
||||||
|
Table "generated_identities.reference_table"
|
||||||
|
Column | Type | Collation | Nullable | Default
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
a | bigint | | not null | generated by default as identity
|
||||||
|
b | bigint | | not null | generated always as identity
|
||||||
|
c | integer | | |
|
||||||
|
Indexes:
|
||||||
|
"reference_table_b_key" UNIQUE CONSTRAINT, btree (b)
|
||||||
|
|
||||||
|
INSERT INTO reference_table (c)
|
||||||
|
SELECT s FROM generate_series(1,10) s;
|
||||||
|
--on master
|
||||||
|
select * from reference_table;
|
||||||
|
a | b | c
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
3940649673949185 | 3940649673949185 | 1
|
||||||
|
3940649673949195 | 3940649673949195 | 2
|
||||||
|
3940649673949205 | 3940649673949205 | 3
|
||||||
|
3940649673949215 | 3940649673949215 | 4
|
||||||
|
3940649673949225 | 3940649673949225 | 5
|
||||||
|
3940649673949235 | 3940649673949235 | 6
|
||||||
|
3940649673949245 | 3940649673949245 | 7
|
||||||
|
3940649673949255 | 3940649673949255 | 8
|
||||||
|
3940649673949265 | 3940649673949265 | 9
|
||||||
|
3940649673949275 | 3940649673949275 | 10
|
||||||
|
(10 rows)
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
INSERT INTO reference_table (c)
|
||||||
|
SELECT s FROM generate_series(11,20) s;
|
||||||
|
SELECT * FROM reference_table ORDER BY c ASC;
|
||||||
|
a | b | c
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
3940649673949185 | 3940649673949185 | 1
|
||||||
|
3940649673949195 | 3940649673949195 | 2
|
||||||
|
3940649673949205 | 3940649673949205 | 3
|
||||||
|
3940649673949215 | 3940649673949215 | 4
|
||||||
|
3940649673949225 | 3940649673949225 | 5
|
||||||
|
3940649673949235 | 3940649673949235 | 6
|
||||||
|
3940649673949245 | 3940649673949245 | 7
|
||||||
|
3940649673949255 | 3940649673949255 | 8
|
||||||
|
3940649673949265 | 3940649673949265 | 9
|
||||||
|
3940649673949275 | 3940649673949275 | 10
|
||||||
|
10 | 10 | 11
|
||||||
|
20 | 20 | 12
|
||||||
|
30 | 30 | 13
|
||||||
|
40 | 40 | 14
|
||||||
|
50 | 50 | 15
|
||||||
|
60 | 60 | 16
|
||||||
|
70 | 70 | 17
|
||||||
|
80 | 80 | 18
|
||||||
|
90 | 90 | 19
|
||||||
|
100 | 100 | 20
|
||||||
|
(20 rows)
|
||||||
|
|
||||||
|
DROP TABLE reference_table;
|
||||||
|
CREATE TABLE color (
|
||||||
|
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
||||||
|
color_name VARCHAR NOT NULL
|
||||||
|
);
|
||||||
|
-- https://github.com/citusdata/citus/issues/6694
|
||||||
|
CREATE USER identity_test_user;
|
||||||
|
GRANT INSERT ON color TO identity_test_user;
|
||||||
|
GRANT USAGE ON SCHEMA generated_identities TO identity_test_user;
|
||||||
|
SET ROLE identity_test_user;
|
||||||
|
SELECT create_distributed_table('color', 'color_id');
|
||||||
|
ERROR: must be owner of table color
|
||||||
|
SET ROLE postgres;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT create_distributed_table_concurrently('color', 'color_id');
|
||||||
|
create_distributed_table_concurrently
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
RESET citus.shard_replication_factor;
|
||||||
|
\c - identity_test_user - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
INSERT INTO color(color_name) VALUES ('Blue');
|
||||||
|
\c - postgres - :master_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
SET citus.next_shard_id TO 12400000;
|
||||||
|
DROP TABLE Color;
|
||||||
|
CREATE TABLE color (
|
||||||
|
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
||||||
|
color_name VARCHAR NOT NULL
|
||||||
|
) USING columnar;
|
||||||
|
SELECT create_distributed_table('color', 'color_id');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
INSERT INTO color(color_name) VALUES ('Blue');
|
||||||
|
\d+ color
|
||||||
|
Table "generated_identities.color"
|
||||||
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
color_id | bigint | | not null | generated always as identity | plain | |
|
||||||
|
color_name | character varying | | not null | | extended | |
|
||||||
|
Indexes:
|
||||||
|
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
\d+ color
|
||||||
|
Table "generated_identities.color"
|
||||||
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
color_id | bigint | | not null | generated always as identity | plain | |
|
||||||
|
color_name | character varying | | not null | | extended | |
|
||||||
|
Indexes:
|
||||||
|
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
||||||
|
|
||||||
|
INSERT INTO color(color_name) VALUES ('Red');
|
||||||
|
-- alter sequence .. restart
|
||||||
|
ALTER SEQUENCE color_color_id_seq RESTART WITH 1000;
|
||||||
|
ERROR: Altering a distributed sequence is currently not supported.
|
||||||
|
-- override system value
|
||||||
|
INSERT INTO color(color_id, color_name) VALUES (1, 'Red');
|
||||||
|
ERROR: cannot insert into column "color_id"
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||||
|
INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red');
|
||||||
|
ERROR: cannot insert into column "color_id"
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||||
|
INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red');
|
||||||
|
ERROR: duplicate key value violates unique constraint "color_color_id_key_12400000"
|
||||||
|
DETAIL: Key (color_id)=(1) already exists.
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
-- update null or custom value
|
||||||
|
UPDATE color SET color_id = NULL;
|
||||||
|
ERROR: column "color_id" can only be updated to DEFAULT
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
UPDATE color SET color_id = 1;
|
||||||
|
ERROR: column "color_id" can only be updated to DEFAULT
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
\c - postgres - :master_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
-- alter table .. add column .. GENERATED .. AS IDENTITY
|
||||||
|
ALTER TABLE color ADD COLUMN color_id BIGINT GENERATED ALWAYS AS IDENTITY;
|
||||||
|
ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers
|
||||||
|
-- alter sequence .. restart
|
||||||
|
ALTER SEQUENCE color_color_id_seq RESTART WITH 1000;
|
||||||
|
ERROR: Altering a distributed sequence is currently not supported.
|
||||||
|
-- override system value
|
||||||
|
INSERT INTO color(color_id, color_name) VALUES (1, 'Red');
|
||||||
|
ERROR: cannot insert into column "color_id"
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||||
|
INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red');
|
||||||
|
ERROR: cannot insert into column "color_id"
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||||
|
INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red');
|
||||||
|
ERROR: duplicate key value violates unique constraint "color_color_id_key_12400000"
|
||||||
|
DETAIL: Key (color_id)=(1) already exists.
|
||||||
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
-- update null or custom value
|
||||||
|
UPDATE color SET color_id = NULL;
|
||||||
|
ERROR: column "color_id" can only be updated to DEFAULT
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
UPDATE color SET color_id = 1;
|
||||||
|
ERROR: column "color_id" can only be updated to DEFAULT
|
||||||
|
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||||
|
DROP TABLE IF EXISTS test;
|
||||||
|
CREATE TABLE test (x int, y int, z bigint generated by default as identity);
|
||||||
|
SELECT create_distributed_table('test', 'x', colocate_with := 'none');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
INSERT INTO test VALUES (1,2);
|
||||||
|
INSERT INTO test SELECT x, y FROM test WHERE x = 1;
|
||||||
|
SELECT * FROM test;
|
||||||
|
x | y | z
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 2 | 1
|
||||||
|
1 | 2 | 2
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
DROP SCHEMA generated_identities CASCADE;
|
||||||
|
DROP USER identity_test_user;
|
|
@ -1,42 +0,0 @@
|
||||||
Parsed test spec with 2 sessions
|
|
||||||
|
|
||||||
starting permutation: s1-register s2-lock s1-lock s2-wrong-cancel-1 s2-wrong-cancel-2 s2-cancel
|
|
||||||
step s1-register:
|
|
||||||
INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
|
|
||||||
|
|
||||||
step s2-lock:
|
|
||||||
BEGIN;
|
|
||||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
|
||||||
|
|
||||||
step s1-lock:
|
|
||||||
BEGIN;
|
|
||||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
|
||||||
END;
|
|
||||||
<waiting ...>
|
|
||||||
step s2-wrong-cancel-1:
|
|
||||||
SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
|
|
||||||
|
|
||||||
run_pg_send_cancellation
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
step s2-wrong-cancel-2:
|
|
||||||
SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
|
|
||||||
|
|
||||||
run_pg_send_cancellation
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
step s2-cancel:
|
|
||||||
SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
|
|
||||||
END;
|
|
||||||
|
|
||||||
run_pg_send_cancellation
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
step s1-lock: <... completed>
|
|
||||||
ERROR: canceling statement due to user request
|
|
|
@ -1290,8 +1290,82 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.p
|
||||||
(schema,{test_schema_for_sequence_propagation},{})
|
(schema,{test_schema_for_sequence_propagation},{})
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- Bug: https://github.com/citusdata/citus/issues/7378
|
||||||
|
-- Create a reference table
|
||||||
|
CREATE TABLE tbl_ref_mats(row_id integer primary key);
|
||||||
|
INSERT INTO tbl_ref_mats VALUES (1), (2);
|
||||||
|
SELECT create_reference_table('tbl_ref_mats');
|
||||||
|
NOTICE: Copying data from local table...
|
||||||
|
NOTICE: copying the data has completed
|
||||||
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||||
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.tbl_ref_mats$$)
|
||||||
|
create_reference_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Create a distributed table
|
||||||
|
CREATE TABLE tbl_dist_mats(series_id integer);
|
||||||
|
INSERT INTO tbl_dist_mats VALUES (1), (1), (2), (2);
|
||||||
|
SELECT create_distributed_table('tbl_dist_mats', 'series_id');
|
||||||
|
NOTICE: Copying data from local table...
|
||||||
|
NOTICE: copying the data has completed
|
||||||
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||||
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.tbl_dist_mats$$)
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Create a view that joins the distributed table with the reference table on the distribution key.
|
||||||
|
CREATE VIEW vw_citus_views as
|
||||||
|
SELECT d.series_id FROM tbl_dist_mats d JOIN tbl_ref_mats r ON d.series_id = r.row_id;
|
||||||
|
-- The view initially works fine
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
series_id
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
1
|
||||||
|
2
|
||||||
|
2
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
-- Now, alter the table
|
||||||
|
ALTER TABLE tbl_ref_mats ADD COLUMN category1 varchar(50);
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
series_id
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
1
|
||||||
|
2
|
||||||
|
2
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
ALTER TABLE tbl_ref_mats ADD COLUMN category2 varchar(50);
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
series_id
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
1
|
||||||
|
2
|
||||||
|
2
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
ALTER TABLE tbl_ref_mats DROP COLUMN category1;
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
series_id
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1
|
||||||
|
1
|
||||||
|
2
|
||||||
|
2
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
|
DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
|
||||||
NOTICE: drop cascades to 2 other objects
|
NOTICE: drop cascades to 2 other objects
|
||||||
DETAIL: drop cascades to sequence test_schema_for_sequence_propagation.seq_10
|
DETAIL: drop cascades to sequence test_schema_for_sequence_propagation.seq_10
|
||||||
drop cascades to default value for column x of table table_without_sequence
|
drop cascades to default value for column x of table table_without_sequence
|
||||||
DROP TABLE table_without_sequence;
|
DROP TABLE table_without_sequence;
|
||||||
|
DROP TABLE tbl_ref_mats CASCADE;
|
||||||
|
NOTICE: drop cascades to view vw_citus_views
|
||||||
|
DROP TABLE tbl_dist_mats CASCADE;
|
||||||
|
|
|
@ -1303,14 +1303,23 @@ SELECT * FROM multi_extension.print_extension_changes();
|
||||||
| type cluster_clock
|
| type cluster_clock
|
||||||
(38 rows)
|
(38 rows)
|
||||||
|
|
||||||
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
|
||||||
-- show running version
|
-- show running version
|
||||||
SHOW citus.version;
|
SHOW citus.version;
|
||||||
citus.version
|
citus.version
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
11.2devel
|
11.2.2
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- Snapshot of state at 11.2-2
|
||||||
|
ALTER EXTENSION citus UPDATE TO '11.2-2';
|
||||||
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
previous_object | current_object
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
| function worker_adjust_identity_column_seq_ranges(regclass) void
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Test downgrade to 11.2-1 from 11.2-2
|
||||||
|
ALTER EXTENSION citus UPDATE TO '11.2-1';
|
||||||
-- ensure no unexpected objects were created outside pg_catalog
|
-- ensure no unexpected objects were created outside pg_catalog
|
||||||
SELECT pgio.type, pgio.identity
|
SELECT pgio.type, pgio.identity
|
||||||
FROM pg_depend AS pgd,
|
FROM pg_depend AS pgd,
|
||||||
|
@ -1326,6 +1335,7 @@ ORDER BY 1, 2;
|
||||||
view | public.citus_tables
|
view | public.citus_tables
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||||
-- see incompatible version errors out
|
-- see incompatible version errors out
|
||||||
RESET citus.enable_version_checks;
|
RESET citus.enable_version_checks;
|
||||||
RESET columnar.enable_version_checks;
|
RESET columnar.enable_version_checks;
|
||||||
|
|
|
@ -238,8 +238,40 @@ ORDER BY
|
||||||
LIMIT 1 OFFSET 1;
|
LIMIT 1 OFFSET 1;
|
||||||
ERROR: operation is not allowed on this node
|
ERROR: operation is not allowed on this node
|
||||||
HINT: Connect to the coordinator and run it again.
|
HINT: Connect to the coordinator and run it again.
|
||||||
|
-- Check that shards of a table with GENERATED columns can be moved.
|
||||||
|
\c - - - :master_port
|
||||||
|
SET citus.shard_count TO 4;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
CREATE TABLE mx_table_with_generated_column (a int, b int GENERATED ALWAYS AS ( a + 3 ) STORED, c int);
|
||||||
|
SELECT create_distributed_table('mx_table_with_generated_column', 'a');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Check that dropped columns are handled properly in a move.
|
||||||
|
ALTER TABLE mx_table_with_generated_column DROP COLUMN c;
|
||||||
|
-- Move a shard from worker 1 to worker 2
|
||||||
|
SELECT
|
||||||
|
citus_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical')
|
||||||
|
FROM
|
||||||
|
pg_dist_shard NATURAL JOIN pg_dist_shard_placement
|
||||||
|
WHERE
|
||||||
|
logicalrelid = 'mx_table_with_generated_column'::regclass
|
||||||
|
AND nodeport = :worker_1_port
|
||||||
|
ORDER BY
|
||||||
|
shardid
|
||||||
|
LIMIT 1;
|
||||||
|
citus_move_shard_placement
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- Cleanup
|
-- Cleanup
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
SET client_min_messages TO WARNING;
|
||||||
|
CALL citus_cleanup_orphaned_resources();
|
||||||
|
DROP TABLE mx_table_with_generated_column;
|
||||||
DROP TABLE mx_table_1;
|
DROP TABLE mx_table_1;
|
||||||
DROP TABLE mx_table_2;
|
DROP TABLE mx_table_2;
|
||||||
DROP TABLE mx_table_3;
|
DROP TABLE mx_table_3;
|
||||||
|
|
|
@ -497,22 +497,22 @@ ORDER BY table_name::text;
|
||||||
SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards ORDER BY shard_name::text;
|
SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards ORDER BY shard_name::text;
|
||||||
shard_name | table_name | citus_table_type | shard_size
|
shard_name | table_name | citus_table_type | shard_size
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220096 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220097 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220097 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220098 | app_analytics_events_mx | distributed | 8192
|
||||||
app_analytics_events_mx_1220099 | app_analytics_events_mx | distributed | 0
|
app_analytics_events_mx_1220099 | app_analytics_events_mx | distributed | 8192
|
||||||
articles_hash_mx_1220104 | articles_hash_mx | distributed | 0
|
articles_hash_mx_1220104 | articles_hash_mx | distributed | 0
|
||||||
articles_hash_mx_1220104 | articles_hash_mx | distributed | 0
|
articles_hash_mx_1220104 | articles_hash_mx | distributed | 0
|
||||||
articles_hash_mx_1220104 | articles_hash_mx | distributed | 0
|
articles_hash_mx_1220104 | articles_hash_mx | distributed | 0
|
||||||
|
@ -608,22 +608,22 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR
|
||||||
citus_mx_test_schema.nation_hash_collation_search_path_1220046 | citus_mx_test_schema.nation_hash_collation_search_path | distributed | 0
|
citus_mx_test_schema.nation_hash_collation_search_path_1220046 | citus_mx_test_schema.nation_hash_collation_search_path | distributed | 0
|
||||||
citus_mx_test_schema.nation_hash_collation_search_path_1220046 | citus_mx_test_schema.nation_hash_collation_search_path | distributed | 0
|
citus_mx_test_schema.nation_hash_collation_search_path_1220046 | citus_mx_test_schema.nation_hash_collation_search_path | distributed | 0
|
||||||
citus_mx_test_schema.nation_hash_collation_search_path_1220047 | citus_mx_test_schema.nation_hash_collation_search_path | distributed | 8192
|
citus_mx_test_schema.nation_hash_collation_search_path_1220047 | citus_mx_test_schema.nation_hash_collation_search_path | distributed | 8192
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 16384
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 16384
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 16384
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 16384
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 16384
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 16384
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
citus_mx_test_schema.nation_hash_composite_types_1220048 | citus_mx_test_schema.nation_hash_composite_types | distributed | 16384
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220049 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
citus_mx_test_schema.nation_hash_composite_types_1220049 | citus_mx_test_schema.nation_hash_composite_types | distributed | 16384
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 0
|
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 0
|
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 0
|
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 0
|
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 0
|
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 0
|
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 0
|
citus_mx_test_schema.nation_hash_composite_types_1220050 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
||||||
citus_mx_test_schema.nation_hash_composite_types_1220051 | citus_mx_test_schema.nation_hash_composite_types | distributed | 8192
|
citus_mx_test_schema.nation_hash_composite_types_1220051 | citus_mx_test_schema.nation_hash_composite_types | distributed | 16384
|
||||||
citus_mx_test_schema_join_1.nation_hash_1220032 | citus_mx_test_schema_join_1.nation_hash | distributed | 0
|
citus_mx_test_schema_join_1.nation_hash_1220032 | citus_mx_test_schema_join_1.nation_hash | distributed | 0
|
||||||
citus_mx_test_schema_join_1.nation_hash_1220032 | citus_mx_test_schema_join_1.nation_hash | distributed | 0
|
citus_mx_test_schema_join_1.nation_hash_1220032 | citus_mx_test_schema_join_1.nation_hash | distributed | 0
|
||||||
citus_mx_test_schema_join_1.nation_hash_1220032 | citus_mx_test_schema_join_1.nation_hash | distributed | 0
|
citus_mx_test_schema_join_1.nation_hash_1220032 | citus_mx_test_schema_join_1.nation_hash | distributed | 0
|
||||||
|
@ -696,109 +696,109 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR
|
||||||
customer_mx_1220084 | customer_mx | reference | 0
|
customer_mx_1220084 | customer_mx | reference | 0
|
||||||
customer_mx_1220084 | customer_mx | reference | 0
|
customer_mx_1220084 | customer_mx | reference | 0
|
||||||
customer_mx_1220084 | customer_mx | reference | 0
|
customer_mx_1220084 | customer_mx | reference | 0
|
||||||
labs_mx_1220102 | labs_mx | distributed | 0
|
labs_mx_1220102 | labs_mx | distributed | 8192
|
||||||
labs_mx_1220102 | labs_mx | distributed | 0
|
labs_mx_1220102 | labs_mx | distributed | 8192
|
||||||
labs_mx_1220102 | labs_mx | distributed | 0
|
labs_mx_1220102 | labs_mx | distributed | 8192
|
||||||
labs_mx_1220102 | labs_mx | distributed | 0
|
labs_mx_1220102 | labs_mx | distributed | 8192
|
||||||
labs_mx_1220102 | labs_mx | distributed | 0
|
labs_mx_1220102 | labs_mx | distributed | 8192
|
||||||
labs_mx_1220102 | labs_mx | distributed | 0
|
labs_mx_1220102 | labs_mx | distributed | 8192
|
||||||
labs_mx_1220102 | labs_mx | distributed | 0
|
labs_mx_1220102 | labs_mx | distributed | 8192
|
||||||
limit_orders_mx_1220092 | limit_orders_mx | distributed | 0
|
limit_orders_mx_1220092 | limit_orders_mx | distributed | 16384
|
||||||
limit_orders_mx_1220092 | limit_orders_mx | distributed | 0
|
limit_orders_mx_1220092 | limit_orders_mx | distributed | 16384
|
||||||
limit_orders_mx_1220092 | limit_orders_mx | distributed | 0
|
limit_orders_mx_1220092 | limit_orders_mx | distributed | 16384
|
||||||
limit_orders_mx_1220092 | limit_orders_mx | distributed | 0
|
limit_orders_mx_1220092 | limit_orders_mx | distributed | 16384
|
||||||
limit_orders_mx_1220092 | limit_orders_mx | distributed | 0
|
limit_orders_mx_1220092 | limit_orders_mx | distributed | 16384
|
||||||
limit_orders_mx_1220092 | limit_orders_mx | distributed | 0
|
limit_orders_mx_1220092 | limit_orders_mx | distributed | 16384
|
||||||
limit_orders_mx_1220092 | limit_orders_mx | distributed | 0
|
limit_orders_mx_1220092 | limit_orders_mx | distributed | 16384
|
||||||
limit_orders_mx_1220093 | limit_orders_mx | distributed | 0
|
limit_orders_mx_1220093 | limit_orders_mx | distributed | 16384
|
||||||
lineitem_mx_1220052 | lineitem_mx | distributed | 0
|
lineitem_mx_1220052 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220052 | lineitem_mx | distributed | 0
|
lineitem_mx_1220052 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220052 | lineitem_mx | distributed | 0
|
lineitem_mx_1220052 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220052 | lineitem_mx | distributed | 0
|
lineitem_mx_1220052 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220052 | lineitem_mx | distributed | 0
|
lineitem_mx_1220052 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220052 | lineitem_mx | distributed | 0
|
lineitem_mx_1220052 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220052 | lineitem_mx | distributed | 0
|
lineitem_mx_1220052 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220053 | lineitem_mx | distributed | 0
|
lineitem_mx_1220053 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220054 | lineitem_mx | distributed | 0
|
lineitem_mx_1220054 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220054 | lineitem_mx | distributed | 0
|
lineitem_mx_1220054 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220054 | lineitem_mx | distributed | 0
|
lineitem_mx_1220054 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220054 | lineitem_mx | distributed | 0
|
lineitem_mx_1220054 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220054 | lineitem_mx | distributed | 0
|
lineitem_mx_1220054 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220054 | lineitem_mx | distributed | 0
|
lineitem_mx_1220054 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220054 | lineitem_mx | distributed | 0
|
lineitem_mx_1220054 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220055 | lineitem_mx | distributed | 0
|
lineitem_mx_1220055 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220056 | lineitem_mx | distributed | 0
|
lineitem_mx_1220056 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220056 | lineitem_mx | distributed | 0
|
lineitem_mx_1220056 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220056 | lineitem_mx | distributed | 0
|
lineitem_mx_1220056 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220056 | lineitem_mx | distributed | 0
|
lineitem_mx_1220056 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220056 | lineitem_mx | distributed | 0
|
lineitem_mx_1220056 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220056 | lineitem_mx | distributed | 0
|
lineitem_mx_1220056 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220056 | lineitem_mx | distributed | 0
|
lineitem_mx_1220056 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220057 | lineitem_mx | distributed | 0
|
lineitem_mx_1220057 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220058 | lineitem_mx | distributed | 0
|
lineitem_mx_1220058 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220058 | lineitem_mx | distributed | 0
|
lineitem_mx_1220058 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220058 | lineitem_mx | distributed | 0
|
lineitem_mx_1220058 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220058 | lineitem_mx | distributed | 0
|
lineitem_mx_1220058 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220058 | lineitem_mx | distributed | 0
|
lineitem_mx_1220058 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220058 | lineitem_mx | distributed | 0
|
lineitem_mx_1220058 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220058 | lineitem_mx | distributed | 0
|
lineitem_mx_1220058 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220059 | lineitem_mx | distributed | 0
|
lineitem_mx_1220059 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220060 | lineitem_mx | distributed | 0
|
lineitem_mx_1220060 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220060 | lineitem_mx | distributed | 0
|
lineitem_mx_1220060 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220060 | lineitem_mx | distributed | 0
|
lineitem_mx_1220060 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220060 | lineitem_mx | distributed | 0
|
lineitem_mx_1220060 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220060 | lineitem_mx | distributed | 0
|
lineitem_mx_1220060 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220060 | lineitem_mx | distributed | 0
|
lineitem_mx_1220060 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220060 | lineitem_mx | distributed | 0
|
lineitem_mx_1220060 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220061 | lineitem_mx | distributed | 0
|
lineitem_mx_1220061 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220062 | lineitem_mx | distributed | 0
|
lineitem_mx_1220062 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220062 | lineitem_mx | distributed | 0
|
lineitem_mx_1220062 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220062 | lineitem_mx | distributed | 0
|
lineitem_mx_1220062 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220062 | lineitem_mx | distributed | 0
|
lineitem_mx_1220062 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220062 | lineitem_mx | distributed | 0
|
lineitem_mx_1220062 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220062 | lineitem_mx | distributed | 0
|
lineitem_mx_1220062 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220062 | lineitem_mx | distributed | 0
|
lineitem_mx_1220062 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220063 | lineitem_mx | distributed | 0
|
lineitem_mx_1220063 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220064 | lineitem_mx | distributed | 0
|
lineitem_mx_1220064 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220064 | lineitem_mx | distributed | 0
|
lineitem_mx_1220064 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220064 | lineitem_mx | distributed | 0
|
lineitem_mx_1220064 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220064 | lineitem_mx | distributed | 0
|
lineitem_mx_1220064 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220064 | lineitem_mx | distributed | 0
|
lineitem_mx_1220064 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220064 | lineitem_mx | distributed | 0
|
lineitem_mx_1220064 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220064 | lineitem_mx | distributed | 0
|
lineitem_mx_1220064 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220065 | lineitem_mx | distributed | 0
|
lineitem_mx_1220065 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220066 | lineitem_mx | distributed | 0
|
lineitem_mx_1220066 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220066 | lineitem_mx | distributed | 0
|
lineitem_mx_1220066 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220066 | lineitem_mx | distributed | 0
|
lineitem_mx_1220066 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220066 | lineitem_mx | distributed | 0
|
lineitem_mx_1220066 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220066 | lineitem_mx | distributed | 0
|
lineitem_mx_1220066 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220066 | lineitem_mx | distributed | 0
|
lineitem_mx_1220066 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220066 | lineitem_mx | distributed | 0
|
lineitem_mx_1220066 | lineitem_mx | distributed | 16384
|
||||||
lineitem_mx_1220067 | lineitem_mx | distributed | 0
|
lineitem_mx_1220067 | lineitem_mx | distributed | 16384
|
||||||
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 0
|
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 8192
|
||||||
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 0
|
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 8192
|
||||||
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 0
|
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 8192
|
||||||
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 0
|
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 8192
|
||||||
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 0
|
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 8192
|
||||||
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 0
|
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 8192
|
||||||
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 0
|
multiple_hash_mx_1220094 | multiple_hash_mx | distributed | 8192
|
||||||
multiple_hash_mx_1220095 | multiple_hash_mx | distributed | 0
|
multiple_hash_mx_1220095 | multiple_hash_mx | distributed | 8192
|
||||||
mx_ddl_table_1220088 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220088 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220088 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220088 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220088 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220088 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220088 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220088 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220088 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220088 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220088 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220088 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220088 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220088 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220089 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220089 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220090 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220090 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220090 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220090 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220090 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220090 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220090 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220090 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220090 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220090 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220090 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220090 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220090 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220090 | mx_ddl_table | distributed | 24576
|
||||||
mx_ddl_table_1220091 | mx_ddl_table | distributed | 8192
|
mx_ddl_table_1220091 | mx_ddl_table | distributed | 24576
|
||||||
nation_hash_1220000 | nation_hash | distributed | 0
|
nation_hash_1220000 | nation_hash | distributed | 0
|
||||||
nation_hash_1220000 | nation_hash | distributed | 0
|
nation_hash_1220000 | nation_hash | distributed | 0
|
||||||
nation_hash_1220000 | nation_hash | distributed | 0
|
nation_hash_1220000 | nation_hash | distributed | 0
|
||||||
|
@ -871,77 +871,77 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR
|
||||||
nation_mx_1220085 | nation_mx | reference | 0
|
nation_mx_1220085 | nation_mx | reference | 0
|
||||||
nation_mx_1220085 | nation_mx | reference | 0
|
nation_mx_1220085 | nation_mx | reference | 0
|
||||||
nation_mx_1220085 | nation_mx | reference | 0
|
nation_mx_1220085 | nation_mx | reference | 0
|
||||||
objects_mx_1220103 | objects_mx | distributed | 0
|
objects_mx_1220103 | objects_mx | distributed | 16384
|
||||||
objects_mx_1220103 | objects_mx | distributed | 0
|
objects_mx_1220103 | objects_mx | distributed | 16384
|
||||||
objects_mx_1220103 | objects_mx | distributed | 0
|
objects_mx_1220103 | objects_mx | distributed | 16384
|
||||||
objects_mx_1220103 | objects_mx | distributed | 0
|
objects_mx_1220103 | objects_mx | distributed | 16384
|
||||||
objects_mx_1220103 | objects_mx | distributed | 0
|
objects_mx_1220103 | objects_mx | distributed | 16384
|
||||||
objects_mx_1220103 | objects_mx | distributed | 0
|
objects_mx_1220103 | objects_mx | distributed | 16384
|
||||||
objects_mx_1220103 | objects_mx | distributed | 0
|
objects_mx_1220103 | objects_mx | distributed | 16384
|
||||||
orders_mx_1220068 | orders_mx | distributed | 0
|
orders_mx_1220068 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220068 | orders_mx | distributed | 0
|
orders_mx_1220068 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220068 | orders_mx | distributed | 0
|
orders_mx_1220068 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220068 | orders_mx | distributed | 0
|
orders_mx_1220068 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220068 | orders_mx | distributed | 0
|
orders_mx_1220068 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220068 | orders_mx | distributed | 0
|
orders_mx_1220068 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220068 | orders_mx | distributed | 0
|
orders_mx_1220068 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220069 | orders_mx | distributed | 0
|
orders_mx_1220069 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220070 | orders_mx | distributed | 0
|
orders_mx_1220070 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220070 | orders_mx | distributed | 0
|
orders_mx_1220070 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220070 | orders_mx | distributed | 0
|
orders_mx_1220070 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220070 | orders_mx | distributed | 0
|
orders_mx_1220070 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220070 | orders_mx | distributed | 0
|
orders_mx_1220070 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220070 | orders_mx | distributed | 0
|
orders_mx_1220070 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220070 | orders_mx | distributed | 0
|
orders_mx_1220070 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220071 | orders_mx | distributed | 0
|
orders_mx_1220071 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220072 | orders_mx | distributed | 0
|
orders_mx_1220072 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220072 | orders_mx | distributed | 0
|
orders_mx_1220072 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220072 | orders_mx | distributed | 0
|
orders_mx_1220072 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220072 | orders_mx | distributed | 0
|
orders_mx_1220072 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220072 | orders_mx | distributed | 0
|
orders_mx_1220072 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220072 | orders_mx | distributed | 0
|
orders_mx_1220072 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220072 | orders_mx | distributed | 0
|
orders_mx_1220072 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220073 | orders_mx | distributed | 0
|
orders_mx_1220073 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220074 | orders_mx | distributed | 0
|
orders_mx_1220074 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220074 | orders_mx | distributed | 0
|
orders_mx_1220074 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220074 | orders_mx | distributed | 0
|
orders_mx_1220074 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220074 | orders_mx | distributed | 0
|
orders_mx_1220074 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220074 | orders_mx | distributed | 0
|
orders_mx_1220074 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220074 | orders_mx | distributed | 0
|
orders_mx_1220074 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220074 | orders_mx | distributed | 0
|
orders_mx_1220074 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220075 | orders_mx | distributed | 0
|
orders_mx_1220075 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220076 | orders_mx | distributed | 0
|
orders_mx_1220076 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220076 | orders_mx | distributed | 0
|
orders_mx_1220076 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220076 | orders_mx | distributed | 0
|
orders_mx_1220076 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220076 | orders_mx | distributed | 0
|
orders_mx_1220076 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220076 | orders_mx | distributed | 0
|
orders_mx_1220076 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220076 | orders_mx | distributed | 0
|
orders_mx_1220076 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220076 | orders_mx | distributed | 0
|
orders_mx_1220076 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220077 | orders_mx | distributed | 0
|
orders_mx_1220077 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220078 | orders_mx | distributed | 0
|
orders_mx_1220078 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220078 | orders_mx | distributed | 0
|
orders_mx_1220078 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220078 | orders_mx | distributed | 0
|
orders_mx_1220078 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220078 | orders_mx | distributed | 0
|
orders_mx_1220078 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220078 | orders_mx | distributed | 0
|
orders_mx_1220078 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220078 | orders_mx | distributed | 0
|
orders_mx_1220078 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220078 | orders_mx | distributed | 0
|
orders_mx_1220078 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220079 | orders_mx | distributed | 0
|
orders_mx_1220079 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220080 | orders_mx | distributed | 0
|
orders_mx_1220080 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220080 | orders_mx | distributed | 0
|
orders_mx_1220080 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220080 | orders_mx | distributed | 0
|
orders_mx_1220080 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220080 | orders_mx | distributed | 0
|
orders_mx_1220080 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220080 | orders_mx | distributed | 0
|
orders_mx_1220080 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220080 | orders_mx | distributed | 0
|
orders_mx_1220080 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220080 | orders_mx | distributed | 0
|
orders_mx_1220080 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220081 | orders_mx | distributed | 0
|
orders_mx_1220081 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220082 | orders_mx | distributed | 0
|
orders_mx_1220082 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220082 | orders_mx | distributed | 0
|
orders_mx_1220082 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220082 | orders_mx | distributed | 0
|
orders_mx_1220082 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220082 | orders_mx | distributed | 0
|
orders_mx_1220082 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220082 | orders_mx | distributed | 0
|
orders_mx_1220082 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220082 | orders_mx | distributed | 0
|
orders_mx_1220082 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220082 | orders_mx | distributed | 0
|
orders_mx_1220082 | orders_mx | distributed | 8192
|
||||||
orders_mx_1220083 | orders_mx | distributed | 0
|
orders_mx_1220083 | orders_mx | distributed | 8192
|
||||||
part_mx_1220086 | part_mx | reference | 0
|
part_mx_1220086 | part_mx | reference | 0
|
||||||
part_mx_1220086 | part_mx | reference | 0
|
part_mx_1220086 | part_mx | reference | 0
|
||||||
part_mx_1220086 | part_mx | reference | 0
|
part_mx_1220086 | part_mx | reference | 0
|
||||||
|
@ -950,14 +950,14 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR
|
||||||
part_mx_1220086 | part_mx | reference | 0
|
part_mx_1220086 | part_mx | reference | 0
|
||||||
part_mx_1220086 | part_mx | reference | 0
|
part_mx_1220086 | part_mx | reference | 0
|
||||||
part_mx_1220086 | part_mx | reference | 0
|
part_mx_1220086 | part_mx | reference | 0
|
||||||
researchers_mx_1220100 | researchers_mx | distributed | 0
|
researchers_mx_1220100 | researchers_mx | distributed | 8192
|
||||||
researchers_mx_1220100 | researchers_mx | distributed | 0
|
researchers_mx_1220100 | researchers_mx | distributed | 8192
|
||||||
researchers_mx_1220100 | researchers_mx | distributed | 0
|
researchers_mx_1220100 | researchers_mx | distributed | 8192
|
||||||
researchers_mx_1220100 | researchers_mx | distributed | 0
|
researchers_mx_1220100 | researchers_mx | distributed | 8192
|
||||||
researchers_mx_1220100 | researchers_mx | distributed | 0
|
researchers_mx_1220100 | researchers_mx | distributed | 8192
|
||||||
researchers_mx_1220100 | researchers_mx | distributed | 0
|
researchers_mx_1220100 | researchers_mx | distributed | 8192
|
||||||
researchers_mx_1220100 | researchers_mx | distributed | 0
|
researchers_mx_1220100 | researchers_mx | distributed | 8192
|
||||||
researchers_mx_1220101 | researchers_mx | distributed | 0
|
researchers_mx_1220101 | researchers_mx | distributed | 8192
|
||||||
supplier_mx_1220087 | supplier_mx | reference | 0
|
supplier_mx_1220087 | supplier_mx | reference | 0
|
||||||
supplier_mx_1220087 | supplier_mx | reference | 0
|
supplier_mx_1220087 | supplier_mx | reference | 0
|
||||||
supplier_mx_1220087 | supplier_mx | reference | 0
|
supplier_mx_1220087 | supplier_mx | reference | 0
|
||||||
|
|
|
@ -1095,6 +1095,9 @@ ALTER TABLE IF EXISTS non_existent_table SET SCHEMA non_existent_schema;
|
||||||
NOTICE: relation "non_existent_table" does not exist, skipping
|
NOTICE: relation "non_existent_table" does not exist, skipping
|
||||||
DROP SCHEMA existing_schema, another_existing_schema CASCADE;
|
DROP SCHEMA existing_schema, another_existing_schema CASCADE;
|
||||||
NOTICE: drop cascades to table existing_schema.table_set_schema
|
NOTICE: drop cascades to table existing_schema.table_set_schema
|
||||||
|
-- test DROP SCHEMA with nonexisting schemas
|
||||||
|
DROP SCHEMA ax, bx, cx, dx, ex, fx, gx, jx;
|
||||||
|
ERROR: schema "ax" does not exist
|
||||||
-- test ALTER TABLE SET SCHEMA with interesting names
|
-- test ALTER TABLE SET SCHEMA with interesting names
|
||||||
CREATE SCHEMA "cItuS.T E E N'sSchema";
|
CREATE SCHEMA "cItuS.T E E N'sSchema";
|
||||||
CREATE SCHEMA "citus-teen's scnd schm.";
|
CREATE SCHEMA "citus-teen's scnd schm.";
|
||||||
|
@ -1361,6 +1364,7 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.p
|
||||||
(schema,{run_test_schema},{})
|
(schema,{run_test_schema},{})
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
DROP TABLE public.nation_local;
|
||||||
DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE;
|
DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE;
|
||||||
-- verify that the dropped schema is removed from worker's pg_dist_object
|
-- verify that the dropped schema is removed from worker's pg_dist_object
|
||||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
||||||
|
|
|
@ -254,6 +254,76 @@ FETCH FORWARD 3 FROM holdCursor;
|
||||||
1 | 19
|
1 | 19
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
|
CLOSE holdCursor;
|
||||||
|
-- Test DECLARE CURSOR .. WITH HOLD inside transaction block
|
||||||
|
BEGIN;
|
||||||
|
DECLARE holdCursor CURSOR WITH HOLD FOR
|
||||||
|
SELECT * FROM cursor_me WHERE x = 1 ORDER BY y;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 10
|
||||||
|
1 | 11
|
||||||
|
1 | 12
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
FETCH BACKWARD 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 11
|
||||||
|
1 | 10
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
FETCH FORWARD 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 10
|
||||||
|
1 | 11
|
||||||
|
1 | 12
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 13
|
||||||
|
1 | 14
|
||||||
|
1 | 15
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
CLOSE holdCursor;
|
||||||
|
-- Test DECLARE NO SCROLL CURSOR .. WITH HOLD inside transaction block
|
||||||
|
BEGIN;
|
||||||
|
DECLARE holdCursor NO SCROLL CURSOR WITH HOLD FOR
|
||||||
|
SELECT * FROM cursor_me WHERE x = 1 ORDER BY y;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 10
|
||||||
|
1 | 11
|
||||||
|
1 | 12
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
FETCH FORWARD 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 13
|
||||||
|
1 | 14
|
||||||
|
1 | 15
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
x | y
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
1 | 16
|
||||||
|
1 | 17
|
||||||
|
1 | 18
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
FETCH BACKWARD 3 FROM holdCursor;
|
||||||
|
ERROR: cursor can only scan forward
|
||||||
|
HINT: Declare it with SCROLL option to enable backward scan.
|
||||||
CLOSE holdCursor;
|
CLOSE holdCursor;
|
||||||
-- Test DECLARE CURSOR .. WITH HOLD with parameter
|
-- Test DECLARE CURSOR .. WITH HOLD with parameter
|
||||||
CREATE OR REPLACE FUNCTION declares_cursor(p int)
|
CREATE OR REPLACE FUNCTION declares_cursor(p int)
|
||||||
|
|
|
@ -221,7 +221,7 @@ NOTICE: issuing SET LOCAL citus.shard_count TO '4';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
NOTICE: issuing SELECT pg_catalog.citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
@ -244,7 +244,7 @@ NOTICE: issuing SET LOCAL citus.shard_count TO '4';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
NOTICE: issuing SELECT pg_catalog.citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
@ -267,7 +267,7 @@ NOTICE: issuing SET LOCAL citus.shard_count TO '4';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
NOTICE: issuing SELECT pg_catalog.citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
@ -290,7 +290,7 @@ NOTICE: issuing SET LOCAL citus.shard_count TO '4';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
NOTICE: issuing SET LOCAL citus.shard_replication_factor TO '2';
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing SELECT citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
NOTICE: issuing SELECT pg_catalog.citus_copy_shard_placement(43xxxx,xx,xx,'block_writes')
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
NOTICE: issuing COMMIT
|
NOTICE: issuing COMMIT
|
||||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||||
|
|
|
@ -230,6 +230,7 @@ ORDER BY 1;
|
||||||
function truncate_local_data_after_distributing_table(regclass)
|
function truncate_local_data_after_distributing_table(regclass)
|
||||||
function undistribute_table(regclass,boolean)
|
function undistribute_table(regclass,boolean)
|
||||||
function update_distributed_table_colocation(regclass,text)
|
function update_distributed_table_colocation(regclass,text)
|
||||||
|
function worker_adjust_identity_column_seq_ranges(regclass)
|
||||||
function worker_apply_inter_shard_ddl_command(bigint,text,bigint,text,text)
|
function worker_apply_inter_shard_ddl_command(bigint,text,bigint,text,text)
|
||||||
function worker_apply_sequence_command(text)
|
function worker_apply_sequence_command(text)
|
||||||
function worker_apply_sequence_command(text,regtype)
|
function worker_apply_sequence_command(text,regtype)
|
||||||
|
@ -318,5 +319,5 @@ ORDER BY 1;
|
||||||
view citus_stat_statements
|
view citus_stat_statements
|
||||||
view pg_dist_shard_placement
|
view pg_dist_shard_placement
|
||||||
view time_partitions
|
view time_partitions
|
||||||
(310 rows)
|
(311 rows)
|
||||||
|
|
||||||
|
|
|
@ -142,8 +142,90 @@ SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_810700
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- END: List updated row count for local targets shard.
|
-- END: List updated row count for local targets shard.
|
||||||
|
-- Check that GENERATED columns are handled properly in a shard split operation.
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO worker_split_copy_test;
|
||||||
|
SET citus.shard_count TO 2;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.next_shard_id TO 81080000;
|
||||||
|
-- BEGIN: Create distributed table and insert data.
|
||||||
|
CREATE TABLE worker_split_copy_test.dist_table_with_generated_col(id int primary key, new_id int GENERATED ALWAYS AS ( id + 3 ) stored, value char, col_todrop int);
|
||||||
|
SELECT create_distributed_table('dist_table_with_generated_col', 'id');
|
||||||
|
create_distributed_table
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Check that dropped columns are filtered out in COPY command.
|
||||||
|
ALTER TABLE dist_table_with_generated_col DROP COLUMN col_todrop;
|
||||||
|
INSERT INTO dist_table_with_generated_col (id, value) (SELECT g.id, 'N' FROM generate_series(1, 1000) AS g(id));
|
||||||
|
-- END: Create distributed table and insert data.
|
||||||
|
-- BEGIN: Create target shards in Worker1 and Worker2 for a 2-way split copy.
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
CREATE TABLE worker_split_copy_test.dist_table_with_generated_col_81080015(id int primary key, new_id int GENERATED ALWAYS AS ( id + 3 ) stored, value char);
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
CREATE TABLE worker_split_copy_test.dist_table_with_generated_col_81080016(id int primary key, new_id int GENERATED ALWAYS AS ( id + 3 ) stored, value char);
|
||||||
|
-- BEGIN: List row count for source shard and targets shard in Worker1.
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT COUNT(*) FROM worker_split_copy_test.dist_table_with_generated_col_81080000;
|
||||||
|
count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
510
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT COUNT(*) FROM worker_split_copy_test.dist_table_with_generated_col_81080015;
|
||||||
|
count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- BEGIN: List row count for target shard in Worker2.
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
SELECT COUNT(*) FROM worker_split_copy_test.dist_table_with_generated_col_81080016;
|
||||||
|
count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT * from worker_split_copy(
|
||||||
|
81080000, -- source shard id to copy
|
||||||
|
'id',
|
||||||
|
ARRAY[
|
||||||
|
-- split copy info for split children 1
|
||||||
|
ROW(81080015, -- destination shard id
|
||||||
|
-2147483648, -- split range begin
|
||||||
|
-1073741824, --split range end
|
||||||
|
:worker_1_node)::pg_catalog.split_copy_info,
|
||||||
|
-- split copy info for split children 2
|
||||||
|
ROW(81080016, --destination shard id
|
||||||
|
-1073741823, --split range begin
|
||||||
|
-1, --split range end
|
||||||
|
:worker_2_node)::pg_catalog.split_copy_info
|
||||||
|
]
|
||||||
|
);
|
||||||
|
worker_split_copy
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT COUNT(*) FROM worker_split_copy_test.dist_table_with_generated_col_81080015;
|
||||||
|
count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
247
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
SELECT COUNT(*) FROM worker_split_copy_test.dist_table_with_generated_col_81080016;
|
||||||
|
count
|
||||||
|
---------------------------------------------------------------------
|
||||||
|
263
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- BEGIN: CLEANUP.
|
-- BEGIN: CLEANUP.
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
|
CALL citus_cleanup_orphaned_resources();
|
||||||
DROP SCHEMA worker_split_copy_test CASCADE;
|
DROP SCHEMA worker_split_copy_test CASCADE;
|
||||||
-- END: CLEANUP.
|
-- END: CLEANUP.
|
||||||
|
|
|
@ -67,6 +67,7 @@ test: local_shard_execution_dropped_column
|
||||||
test: metadata_sync_helpers
|
test: metadata_sync_helpers
|
||||||
|
|
||||||
test: issue_6592
|
test: issue_6592
|
||||||
|
test: executor_local_failure
|
||||||
|
|
||||||
# test that no tests leaked intermediate results. This should always be last
|
# test that no tests leaked intermediate results. This should always be last
|
||||||
test: ensure_no_intermediate_data_leak
|
test: ensure_no_intermediate_data_leak
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
test: multi_test_helpers multi_test_helpers_superuser
|
test: multi_test_helpers multi_test_helpers_superuser
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
test: multi_test_catalog_views
|
test: multi_test_catalog_views
|
||||||
|
test: worker_copy_table_to_node
|
||||||
test: shard_rebalancer_unit
|
test: shard_rebalancer_unit
|
||||||
test: shard_rebalancer
|
test: shard_rebalancer
|
||||||
test: background_rebalance
|
test: background_rebalance
|
||||||
test: worker_copy_table_to_node
|
|
||||||
test: foreign_key_to_reference_shard_rebalance
|
test: foreign_key_to_reference_shard_rebalance
|
||||||
test: multi_move_mx
|
test: multi_move_mx
|
||||||
test: shard_move_deferred_delete
|
test: shard_move_deferred_delete
|
||||||
|
|
|
@ -1,65 +0,0 @@
|
||||||
setup
|
|
||||||
{
|
|
||||||
CREATE FUNCTION run_pg_send_cancellation(int,int)
|
|
||||||
RETURNS void
|
|
||||||
AS 'citus'
|
|
||||||
LANGUAGE C STRICT;
|
|
||||||
|
|
||||||
CREATE FUNCTION get_cancellation_key()
|
|
||||||
RETURNS int
|
|
||||||
AS 'citus'
|
|
||||||
LANGUAGE C STRICT;
|
|
||||||
|
|
||||||
CREATE TABLE cancel_table (pid int, cancel_key int);
|
|
||||||
}
|
|
||||||
|
|
||||||
teardown
|
|
||||||
{
|
|
||||||
DROP TABLE IF EXISTS cancel_table;
|
|
||||||
}
|
|
||||||
|
|
||||||
session "s1"
|
|
||||||
|
|
||||||
/* store the PID and cancellation key of session 1 */
|
|
||||||
step "s1-register"
|
|
||||||
{
|
|
||||||
INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
|
|
||||||
}
|
|
||||||
|
|
||||||
/* lock the table from session 1, will block and get cancelled */
|
|
||||||
step "s1-lock"
|
|
||||||
{
|
|
||||||
BEGIN;
|
|
||||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
|
||||||
END;
|
|
||||||
}
|
|
||||||
|
|
||||||
session "s2"
|
|
||||||
|
|
||||||
/* lock the table from session 2 to block session 1 */
|
|
||||||
step "s2-lock"
|
|
||||||
{
|
|
||||||
BEGIN;
|
|
||||||
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* PID mismatch */
|
|
||||||
step "s2-wrong-cancel-1"
|
|
||||||
{
|
|
||||||
SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* cancellation key mismatch */
|
|
||||||
step "s2-wrong-cancel-2"
|
|
||||||
{
|
|
||||||
SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* cancel the LOCK statement in session 1 */
|
|
||||||
step "s2-cancel"
|
|
||||||
{
|
|
||||||
SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
|
|
||||||
END;
|
|
||||||
}
|
|
||||||
|
|
||||||
permutation "s1-register" "s2-lock" "s1-lock" "s2-wrong-cancel-1" "s2-wrong-cancel-2" "s2-cancel"
|
|
|
@ -108,6 +108,24 @@ SELECT state, details from citus_rebalance_status();
|
||||||
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
||||||
SELECT public.wait_until_metadata_sync(30000);
|
SELECT public.wait_until_metadata_sync(30000);
|
||||||
|
|
||||||
|
-- make sure a non-super user can rebalance when there are reference tables to replicate
|
||||||
|
CREATE TABLE ref_table(a int primary key);
|
||||||
|
SELECT create_reference_table('ref_table');
|
||||||
|
|
||||||
|
-- add a new node to trigger replicate_reference_tables task
|
||||||
|
SELECT 1 FROM citus_set_coordinator_host('localhost');
|
||||||
|
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
|
||||||
|
|
||||||
|
SET ROLE non_super_user_rebalance;
|
||||||
|
SELECT 1 FROM citus_rebalance_start(shard_transfer_mode := 'force_logical');
|
||||||
|
|
||||||
|
-- wait for success
|
||||||
|
SELECT citus_rebalance_wait();
|
||||||
|
SELECT state, details from citus_rebalance_status();
|
||||||
|
|
||||||
|
RESET ROLE;
|
||||||
|
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
DROP SCHEMA background_rebalance CASCADE;
|
DROP SCHEMA background_rebalance CASCADE;
|
||||||
DROP USER non_super_user_rebalance;
|
DROP USER non_super_user_rebalance;
|
||||||
|
SELECT 1 FROM citus_remove_node('localhost', :master_port);
|
||||||
|
|
|
@ -53,7 +53,7 @@ SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none');
|
||||||
CREATE TABLE reference_table (measureid integer PRIMARY KEY);
|
CREATE TABLE reference_table (measureid integer PRIMARY KEY);
|
||||||
SELECT create_reference_table('reference_table');
|
SELECT create_reference_table('reference_table');
|
||||||
|
|
||||||
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY);
|
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY, genid integer GENERATED ALWAYS AS ( measureid + 3 ) stored, value varchar(44), col_todrop integer);
|
||||||
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
|
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
|
||||||
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
|
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
|
||||||
|
|
||||||
|
@ -70,9 +70,11 @@ ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) RE
|
||||||
|
|
||||||
-- BEGIN : Load data into tables.
|
-- BEGIN : Load data into tables.
|
||||||
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
|
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
|
||||||
INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i;
|
INSERT INTO colocated_dist_table(measureid, value, col_todrop) SELECT i,'Value',i FROM generate_series(0,1000)i;
|
||||||
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
|
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
|
||||||
|
|
||||||
|
ALTER TABLE colocated_dist_table DROP COLUMN col_todrop;
|
||||||
|
|
||||||
SELECT COUNT(*) FROM sensors;
|
SELECT COUNT(*) FROM sensors;
|
||||||
SELECT COUNT(*) FROM reference_table;
|
SELECT COUNT(*) FROM reference_table;
|
||||||
SELECT COUNT(*) FROM colocated_dist_table;
|
SELECT COUNT(*) FROM colocated_dist_table;
|
||||||
|
|
|
@ -49,7 +49,7 @@ SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none');
|
||||||
CREATE TABLE reference_table (measureid integer PRIMARY KEY);
|
CREATE TABLE reference_table (measureid integer PRIMARY KEY);
|
||||||
SELECT create_reference_table('reference_table');
|
SELECT create_reference_table('reference_table');
|
||||||
|
|
||||||
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY);
|
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY, genid integer GENERATED ALWAYS AS ( measureid + 3 ) stored, value varchar(44), col_todrop integer);
|
||||||
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
|
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
|
||||||
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
|
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
|
||||||
|
|
||||||
|
@ -66,9 +66,11 @@ ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) RE
|
||||||
|
|
||||||
-- BEGIN : Load data into tables.
|
-- BEGIN : Load data into tables.
|
||||||
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
|
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
|
||||||
INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i;
|
INSERT INTO colocated_dist_table(measureid, value, col_todrop) SELECT i,'Value',i FROM generate_series(0,1000)i;
|
||||||
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
|
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
|
||||||
|
|
||||||
|
ALTER TABLE colocated_dist_table DROP COLUMN col_todrop;
|
||||||
|
|
||||||
SELECT COUNT(*) FROM sensors;
|
SELECT COUNT(*) FROM sensors;
|
||||||
SELECT COUNT(*) FROM reference_table;
|
SELECT COUNT(*) FROM reference_table;
|
||||||
SELECT COUNT(*) FROM colocated_dist_table;
|
SELECT COUNT(*) FROM colocated_dist_table;
|
||||||
|
|
|
@ -28,6 +28,14 @@ set citus.shard_replication_factor to 2;
|
||||||
select create_distributed_table_concurrently('test','key', 'hash');
|
select create_distributed_table_concurrently('test','key', 'hash');
|
||||||
set citus.shard_replication_factor to 1;
|
set citus.shard_replication_factor to 1;
|
||||||
|
|
||||||
|
set citus.shard_replication_factor to 2;
|
||||||
|
create table dist_1(a int);
|
||||||
|
select create_distributed_table('dist_1', 'a');
|
||||||
|
set citus.shard_replication_factor to 1;
|
||||||
|
|
||||||
|
create table dist_2(a int);
|
||||||
|
select create_distributed_table_concurrently('dist_2', 'a', colocate_with=>'dist_1');
|
||||||
|
|
||||||
begin;
|
begin;
|
||||||
select create_distributed_table_concurrently('test','key');
|
select create_distributed_table_concurrently('test','key');
|
||||||
rollback;
|
rollback;
|
||||||
|
@ -63,6 +71,7 @@ rollback;
|
||||||
|
|
||||||
-- verify that we can undistribute the table
|
-- verify that we can undistribute the table
|
||||||
begin;
|
begin;
|
||||||
|
set local client_min_messages to warning;
|
||||||
select undistribute_table('test', cascade_via_foreign_keys := true);
|
select undistribute_table('test', cascade_via_foreign_keys := true);
|
||||||
rollback;
|
rollback;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
CREATE SCHEMA failure_local_modification;
|
||||||
|
SET search_path TO failure_local_modification;
|
||||||
|
SET citus.next_shard_id TO 1989000;
|
||||||
|
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
CREATE TABLE failover_to_local (key int PRIMARY KEY, value varchar(10));
|
||||||
|
SELECT create_reference_table('failover_to_local');
|
||||||
|
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
|
||||||
|
SET search_path TO failure_local_modification;
|
||||||
|
|
||||||
|
-- prevent local connection establishment, imitate
|
||||||
|
-- a failure
|
||||||
|
ALTER SYSTEM SET citus.local_shared_pool_size TO -1;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
SELECT pg_sleep(0.2);
|
||||||
|
BEGIN;
|
||||||
|
-- we force the execution to use connections (e.g., remote execution)
|
||||||
|
-- however, we do not allow connections as local_shared_pool_size=-1
|
||||||
|
-- so, properly error out
|
||||||
|
SET LOCAL citus.enable_local_execution TO false;
|
||||||
|
INSERT INTO failover_to_local VALUES (1,'1'), (2,'2'),(3,'3'),(4,'4');
|
||||||
|
ROLLBACK;
|
||||||
|
|
||||||
|
ALTER SYSTEM RESET citus.local_shared_pool_size;
|
||||||
|
SELECT pg_reload_conf();
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET client_min_messages TO ERROR;
|
||||||
|
DROP SCHEMA failure_local_modification cascade;
|
|
@ -84,6 +84,7 @@ create table partitioned_tbl_with_fkey (x int, y int, t timestamptz default now(
|
||||||
select create_distributed_table('partitioned_tbl_with_fkey','x');
|
select create_distributed_table('partitioned_tbl_with_fkey','x');
|
||||||
create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31');
|
create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31');
|
||||||
create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31');
|
create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31');
|
||||||
|
create table partition_3_with_fkey partition of partitioned_tbl_with_fkey for values from ('2024-01-01') to ('2024-12-31');
|
||||||
insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s;
|
insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s;
|
||||||
|
|
||||||
ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id);
|
ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id);
|
||||||
|
|
|
@ -1,266 +1,235 @@
|
||||||
|
-- This test file has an alternative output because of error messages vary for PG13
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13;
|
||||||
|
|
||||||
CREATE SCHEMA generated_identities;
|
CREATE SCHEMA generated_identities;
|
||||||
SET search_path TO generated_identities;
|
SET search_path TO generated_identities;
|
||||||
SET client_min_messages to ERROR;
|
SET client_min_messages to ERROR;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
|
||||||
SELECT 1 from citus_add_node('localhost', :master_port, groupId=>0);
|
SELECT 1 from citus_add_node('localhost', :master_port, groupId=>0);
|
||||||
|
|
||||||
DROP TABLE IF EXISTS generated_identities_test;
|
-- smallint identity column can not be distributed
|
||||||
|
CREATE TABLE smallint_identity_column (
|
||||||
-- create a partitioned table for testing.
|
a smallint GENERATED BY DEFAULT AS IDENTITY
|
||||||
CREATE TABLE generated_identities_test (
|
|
||||||
a int CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY,
|
|
||||||
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
|
||||||
c smallint GENERATED BY DEFAULT AS IDENTITY,
|
|
||||||
d serial,
|
|
||||||
e bigserial,
|
|
||||||
f smallserial,
|
|
||||||
g int
|
|
||||||
)
|
|
||||||
PARTITION BY RANGE (a);
|
|
||||||
CREATE TABLE generated_identities_test_1_5 PARTITION OF generated_identities_test FOR VALUES FROM (1) TO (5);
|
|
||||||
CREATE TABLE generated_identities_test_5_50 PARTITION OF generated_identities_test FOR VALUES FROM (5) TO (50);
|
|
||||||
|
|
||||||
-- local tables
|
|
||||||
SELECT citus_add_local_table_to_metadata('generated_identities_test');
|
|
||||||
|
|
||||||
\d generated_identities_test
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
|
|
||||||
\d generated_identities.generated_identities_test
|
|
||||||
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
SELECT undistribute_table('generated_identities_test');
|
|
||||||
|
|
||||||
SELECT citus_remove_node('localhost', :master_port);
|
|
||||||
|
|
||||||
SELECT create_distributed_table('generated_identities_test', 'a');
|
|
||||||
|
|
||||||
\d generated_identities_test
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
|
|
||||||
\d generated_identities.generated_identities_test
|
|
||||||
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
insert into generated_identities_test (g) values (1);
|
|
||||||
|
|
||||||
insert into generated_identities_test (g) SELECT 2;
|
|
||||||
|
|
||||||
INSERT INTO generated_identities_test (g)
|
|
||||||
SELECT s FROM generate_series(3,7) s;
|
|
||||||
|
|
||||||
SELECT * FROM generated_identities_test ORDER BY 1;
|
|
||||||
|
|
||||||
SELECT undistribute_table('generated_identities_test');
|
|
||||||
|
|
||||||
SELECT * FROM generated_identities_test ORDER BY 1;
|
|
||||||
|
|
||||||
\d generated_identities_test
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
|
|
||||||
\d generated_identities.generated_identities_test
|
|
||||||
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
INSERT INTO generated_identities_test (g)
|
|
||||||
SELECT s FROM generate_series(8,10) s;
|
|
||||||
|
|
||||||
SELECT * FROM generated_identities_test ORDER BY 1;
|
|
||||||
|
|
||||||
-- distributed table
|
|
||||||
SELECT create_distributed_table('generated_identities_test', 'a');
|
|
||||||
|
|
||||||
-- alter table .. alter column .. add is unsupported
|
|
||||||
ALTER TABLE generated_identities_test ALTER COLUMN g ADD GENERATED ALWAYS AS IDENTITY;
|
|
||||||
|
|
||||||
-- alter table .. alter column is unsupported
|
|
||||||
ALTER TABLE generated_identities_test ALTER COLUMN b TYPE int;
|
|
||||||
|
|
||||||
SELECT alter_distributed_table('generated_identities_test', 'g');
|
|
||||||
|
|
||||||
SELECT alter_distributed_table('generated_identities_test', 'b');
|
|
||||||
|
|
||||||
SELECT alter_distributed_table('generated_identities_test', 'c');
|
|
||||||
|
|
||||||
SELECT undistribute_table('generated_identities_test');
|
|
||||||
|
|
||||||
SELECT * FROM generated_identities_test ORDER BY g;
|
|
||||||
|
|
||||||
-- reference table
|
|
||||||
|
|
||||||
DROP TABLE generated_identities_test;
|
|
||||||
|
|
||||||
CREATE TABLE generated_identities_test (
|
|
||||||
a int GENERATED BY DEFAULT AS IDENTITY,
|
|
||||||
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
|
||||||
c smallint GENERATED BY DEFAULT AS IDENTITY,
|
|
||||||
d serial,
|
|
||||||
e bigserial,
|
|
||||||
f smallserial,
|
|
||||||
g int
|
|
||||||
);
|
);
|
||||||
|
SELECT create_distributed_table('smallint_identity_column', 'a');
|
||||||
|
SELECT create_distributed_table_concurrently('smallint_identity_column', 'a');
|
||||||
|
SELECT create_reference_table('smallint_identity_column');
|
||||||
|
SELECT citus_add_local_table_to_metadata('smallint_identity_column');
|
||||||
|
|
||||||
SELECT create_reference_table('generated_identities_test');
|
DROP TABLE smallint_identity_column;
|
||||||
|
|
||||||
\d generated_identities_test
|
-- int identity column can not be distributed
|
||||||
|
CREATE TABLE int_identity_column (
|
||||||
|
a int GENERATED BY DEFAULT AS IDENTITY
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('int_identity_column', 'a');
|
||||||
|
SELECT create_distributed_table_concurrently('int_identity_column', 'a');
|
||||||
|
SELECT create_reference_table('int_identity_column');
|
||||||
|
SELECT citus_add_local_table_to_metadata('int_identity_column');
|
||||||
|
DROP TABLE int_identity_column;
|
||||||
|
RESET citus.shard_replication_factor;
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE bigint_identity_column (
|
||||||
|
a bigint GENERATED BY DEFAULT AS IDENTITY,
|
||||||
|
b int
|
||||||
|
);
|
||||||
|
SELECT citus_add_local_table_to_metadata('bigint_identity_column');
|
||||||
|
DROP TABLE bigint_identity_column;
|
||||||
|
|
||||||
|
CREATE TABLE bigint_identity_column (
|
||||||
|
a bigint GENERATED BY DEFAULT AS IDENTITY,
|
||||||
|
b int
|
||||||
|
);
|
||||||
|
SELECT create_distributed_table('bigint_identity_column', 'a');
|
||||||
|
|
||||||
|
\d bigint_identity_column
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
|
||||||
\d generated_identities.generated_identities_test
|
INSERT INTO bigint_identity_column (b)
|
||||||
|
SELECT s FROM generate_series(1,10) s;
|
||||||
|
|
||||||
|
\d generated_identities.bigint_identity_column
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET search_path TO generated_identities;
|
SET search_path TO generated_identities;
|
||||||
SET client_min_messages to ERROR;
|
SET client_min_messages to ERROR;
|
||||||
|
|
||||||
INSERT INTO generated_identities_test (g)
|
INSERT INTO bigint_identity_column (b)
|
||||||
SELECT s FROM generate_series(11,20) s;
|
SELECT s FROM generate_series(11,20) s;
|
||||||
|
|
||||||
SELECT * FROM generated_identities_test ORDER BY g;
|
SELECT * FROM bigint_identity_column ORDER BY B ASC;
|
||||||
|
|
||||||
SELECT undistribute_table('generated_identities_test');
|
-- table with identity column cannot be altered.
|
||||||
|
SELECT alter_distributed_table('bigint_identity_column', 'b');
|
||||||
|
|
||||||
\d generated_identities_test
|
-- table with identity column cannot be undistributed.
|
||||||
|
SELECT undistribute_table('bigint_identity_column');
|
||||||
|
|
||||||
|
DROP TABLE bigint_identity_column;
|
||||||
|
|
||||||
|
-- create a partitioned table for testing.
|
||||||
|
CREATE TABLE partitioned_table (
|
||||||
|
a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||||
|
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||||
|
c int
|
||||||
|
)
|
||||||
|
PARTITION BY RANGE (c);
|
||||||
|
CREATE TABLE partitioned_table_1_50 PARTITION OF partitioned_table FOR VALUES FROM (1) TO (50);
|
||||||
|
CREATE TABLE partitioned_table_50_500 PARTITION OF partitioned_table FOR VALUES FROM (50) TO (1000);
|
||||||
|
|
||||||
|
SELECT create_distributed_table('partitioned_table', 'a');
|
||||||
|
|
||||||
|
\d partitioned_table
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
|
||||||
\d generated_identities.generated_identities_test
|
\d generated_identities.partitioned_table
|
||||||
|
|
||||||
|
insert into partitioned_table (c) values (1);
|
||||||
|
|
||||||
|
insert into partitioned_table (c) SELECT 2;
|
||||||
|
|
||||||
|
INSERT INTO partitioned_table (c)
|
||||||
|
SELECT s FROM generate_series(3,7) s;
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET search_path TO generated_identities;
|
SET search_path TO generated_identities;
|
||||||
SET client_min_messages to ERROR;
|
SET client_min_messages to ERROR;
|
||||||
|
|
||||||
|
INSERT INTO partitioned_table (c)
|
||||||
|
SELECT s FROM generate_series(10,20) s;
|
||||||
|
|
||||||
|
INSERT INTO partitioned_table (a,c) VALUES (998,998);
|
||||||
|
|
||||||
|
INSERT INTO partitioned_table (a,b,c) OVERRIDING SYSTEM VALUE VALUES (999,999,999);
|
||||||
|
|
||||||
|
SELECT * FROM partitioned_table ORDER BY c ASC;
|
||||||
|
|
||||||
|
-- alter table .. alter column .. add is unsupported
|
||||||
|
ALTER TABLE partitioned_table ALTER COLUMN g ADD GENERATED ALWAYS AS IDENTITY;
|
||||||
|
|
||||||
|
-- alter table .. alter column is unsupported
|
||||||
|
ALTER TABLE partitioned_table ALTER COLUMN b TYPE int;
|
||||||
|
|
||||||
|
DROP TABLE partitioned_table;
|
||||||
|
|
||||||
|
-- create a table for reference table testing.
|
||||||
|
CREATE TABLE reference_table (
|
||||||
|
a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||||
|
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10) UNIQUE,
|
||||||
|
c int
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT create_reference_table('reference_table');
|
||||||
|
|
||||||
|
\d reference_table
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
|
||||||
|
\d generated_identities.reference_table
|
||||||
|
|
||||||
|
INSERT INTO reference_table (c)
|
||||||
|
SELECT s FROM generate_series(1,10) s;
|
||||||
|
|
||||||
|
--on master
|
||||||
|
select * from reference_table;
|
||||||
|
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
|
||||||
|
INSERT INTO reference_table (c)
|
||||||
|
SELECT s FROM generate_series(11,20) s;
|
||||||
|
|
||||||
|
SELECT * FROM reference_table ORDER BY c ASC;
|
||||||
|
|
||||||
|
DROP TABLE reference_table;
|
||||||
|
|
||||||
|
CREATE TABLE color (
|
||||||
|
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
||||||
|
color_name VARCHAR NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
-- https://github.com/citusdata/citus/issues/6694
|
||||||
|
CREATE USER identity_test_user;
|
||||||
|
GRANT INSERT ON color TO identity_test_user;
|
||||||
|
GRANT USAGE ON SCHEMA generated_identities TO identity_test_user;
|
||||||
|
|
||||||
|
SET ROLE identity_test_user;
|
||||||
|
SELECT create_distributed_table('color', 'color_id');
|
||||||
|
|
||||||
|
SET ROLE postgres;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SELECT create_distributed_table_concurrently('color', 'color_id');
|
||||||
|
RESET citus.shard_replication_factor;
|
||||||
|
|
||||||
|
\c - identity_test_user - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
|
||||||
|
INSERT INTO color(color_name) VALUES ('Blue');
|
||||||
|
|
||||||
|
\c - postgres - :master_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
SET citus.next_shard_id TO 12400000;
|
||||||
|
|
||||||
|
DROP TABLE Color;
|
||||||
|
CREATE TABLE color (
|
||||||
|
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
||||||
|
color_name VARCHAR NOT NULL
|
||||||
|
) USING columnar;
|
||||||
|
SELECT create_distributed_table('color', 'color_id');
|
||||||
|
INSERT INTO color(color_name) VALUES ('Blue');
|
||||||
|
\d+ color
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
\d+ color
|
||||||
|
INSERT INTO color(color_name) VALUES ('Red');
|
||||||
|
-- alter sequence .. restart
|
||||||
|
ALTER SEQUENCE color_color_id_seq RESTART WITH 1000;
|
||||||
|
-- override system value
|
||||||
|
INSERT INTO color(color_id, color_name) VALUES (1, 'Red');
|
||||||
|
INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red');
|
||||||
|
INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red');
|
||||||
|
-- update null or custom value
|
||||||
|
UPDATE color SET color_id = NULL;
|
||||||
|
UPDATE color SET color_id = 1;
|
||||||
|
|
||||||
|
\c - postgres - :master_port
|
||||||
|
SET search_path TO generated_identities;
|
||||||
|
SET client_min_messages to ERROR;
|
||||||
|
|
||||||
|
|
||||||
-- alter table .. add column .. GENERATED .. AS IDENTITY
|
-- alter table .. add column .. GENERATED .. AS IDENTITY
|
||||||
DROP TABLE IF EXISTS color;
|
|
||||||
CREATE TABLE color (
|
|
||||||
color_name VARCHAR NOT NULL
|
|
||||||
);
|
|
||||||
SELECT create_distributed_table('color', 'color_name');
|
|
||||||
ALTER TABLE color ADD COLUMN color_id BIGINT GENERATED ALWAYS AS IDENTITY;
|
ALTER TABLE color ADD COLUMN color_id BIGINT GENERATED ALWAYS AS IDENTITY;
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
ALTER TABLE color ADD COLUMN color_id_1 BIGINT GENERATED ALWAYS AS IDENTITY;
|
|
||||||
DROP TABLE color;
|
|
||||||
|
|
||||||
-- insert data from workers
|
-- alter sequence .. restart
|
||||||
CREATE TABLE color (
|
ALTER SEQUENCE color_color_id_seq RESTART WITH 1000;
|
||||||
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
-- override system value
|
||||||
color_name VARCHAR NOT NULL
|
INSERT INTO color(color_id, color_name) VALUES (1, 'Red');
|
||||||
);
|
INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red');
|
||||||
SELECT create_distributed_table('color', 'color_id');
|
INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red');
|
||||||
|
-- update null or custom value
|
||||||
|
UPDATE color SET color_id = NULL;
|
||||||
|
UPDATE color SET color_id = 1;
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
DROP TABLE IF EXISTS test;
|
||||||
SET search_path TO generated_identities;
|
CREATE TABLE test (x int, y int, z bigint generated by default as identity);
|
||||||
SET client_min_messages to ERROR;
|
SELECT create_distributed_table('test', 'x', colocate_with := 'none');
|
||||||
|
INSERT INTO test VALUES (1,2);
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
INSERT INTO test SELECT x, y FROM test WHERE x = 1;
|
||||||
|
SELECT * FROM test;
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
SELECT undistribute_table('color');
|
|
||||||
SELECT create_distributed_table('color', 'color_id');
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
|
|
||||||
SELECT count(*) from color;
|
|
||||||
|
|
||||||
-- modify sequence & alter table
|
|
||||||
DROP TABLE color;
|
|
||||||
|
|
||||||
CREATE TABLE color (
|
|
||||||
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
|
||||||
color_name VARCHAR NOT NULL
|
|
||||||
);
|
|
||||||
SELECT create_distributed_table('color', 'color_id');
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
SELECT undistribute_table('color');
|
|
||||||
|
|
||||||
ALTER SEQUENCE color_color_id_seq RENAME TO myseq;
|
|
||||||
|
|
||||||
SELECT create_distributed_table('color', 'color_id');
|
|
||||||
\ds+ myseq
|
|
||||||
\ds+ color_color_id_seq
|
|
||||||
\d color
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
\ds+ myseq
|
|
||||||
\ds+ color_color_id_seq
|
|
||||||
\d color
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
ALTER SEQUENCE myseq RENAME TO color_color_id_seq;
|
|
||||||
|
|
||||||
\ds+ myseq
|
|
||||||
\ds+ color_color_id_seq
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
\ds+ myseq
|
|
||||||
\ds+ color_color_id_seq
|
|
||||||
\d color
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
SELECT alter_distributed_table('co23423lor', shard_count := 6);
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
\ds+ color_color_id_seq
|
|
||||||
|
|
||||||
INSERT INTO color(color_name) VALUES ('Red');
|
|
||||||
|
|
||||||
\c - - - :master_port
|
|
||||||
SET search_path TO generated_identities;
|
|
||||||
SET client_min_messages to ERROR;
|
|
||||||
|
|
||||||
DROP SCHEMA generated_identities CASCADE;
|
DROP SCHEMA generated_identities CASCADE;
|
||||||
|
DROP USER identity_test_user;
|
||||||
|
|
|
@ -667,5 +667,33 @@ ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_sch
|
||||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass);
|
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass);
|
||||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace);
|
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace);
|
||||||
|
|
||||||
|
-- Bug: https://github.com/citusdata/citus/issues/7378
|
||||||
|
|
||||||
|
-- Create a reference table
|
||||||
|
CREATE TABLE tbl_ref_mats(row_id integer primary key);
|
||||||
|
INSERT INTO tbl_ref_mats VALUES (1), (2);
|
||||||
|
SELECT create_reference_table('tbl_ref_mats');
|
||||||
|
|
||||||
|
-- Create a distributed table
|
||||||
|
CREATE TABLE tbl_dist_mats(series_id integer);
|
||||||
|
INSERT INTO tbl_dist_mats VALUES (1), (1), (2), (2);
|
||||||
|
SELECT create_distributed_table('tbl_dist_mats', 'series_id');
|
||||||
|
|
||||||
|
-- Create a view that joins the distributed table with the reference table on the distribution key.
|
||||||
|
CREATE VIEW vw_citus_views as
|
||||||
|
SELECT d.series_id FROM tbl_dist_mats d JOIN tbl_ref_mats r ON d.series_id = r.row_id;
|
||||||
|
|
||||||
|
-- The view initially works fine
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
-- Now, alter the table
|
||||||
|
ALTER TABLE tbl_ref_mats ADD COLUMN category1 varchar(50);
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
ALTER TABLE tbl_ref_mats ADD COLUMN category2 varchar(50);
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
ALTER TABLE tbl_ref_mats DROP COLUMN category1;
|
||||||
|
SELECT * FROM vw_citus_views ORDER BY 1;
|
||||||
|
|
||||||
DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
|
DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
|
||||||
DROP TABLE table_without_sequence;
|
DROP TABLE table_without_sequence;
|
||||||
|
DROP TABLE tbl_ref_mats CASCADE;
|
||||||
|
DROP TABLE tbl_dist_mats CASCADE;
|
||||||
|
|
|
@ -563,11 +563,17 @@ RESET client_min_messages;
|
||||||
|
|
||||||
SELECT * FROM multi_extension.print_extension_changes();
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
|
||||||
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
|
||||||
|
|
||||||
-- show running version
|
-- show running version
|
||||||
SHOW citus.version;
|
SHOW citus.version;
|
||||||
|
|
||||||
|
-- Snapshot of state at 11.2-2
|
||||||
|
ALTER EXTENSION citus UPDATE TO '11.2-2';
|
||||||
|
|
||||||
|
SELECT * FROM multi_extension.print_extension_changes();
|
||||||
|
|
||||||
|
-- Test downgrade to 11.2-1 from 11.2-2
|
||||||
|
ALTER EXTENSION citus UPDATE TO '11.2-1';
|
||||||
|
|
||||||
-- ensure no unexpected objects were created outside pg_catalog
|
-- ensure no unexpected objects were created outside pg_catalog
|
||||||
SELECT pgio.type, pgio.identity
|
SELECT pgio.type, pgio.identity
|
||||||
FROM pg_depend AS pgd,
|
FROM pg_depend AS pgd,
|
||||||
|
@ -579,6 +585,8 @@ WHERE pgd.refclassid = 'pg_extension'::regclass AND
|
||||||
pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar', 'columnar_internal')
|
pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar', 'columnar_internal')
|
||||||
ORDER BY 1, 2;
|
ORDER BY 1, 2;
|
||||||
|
|
||||||
|
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||||
|
|
||||||
-- see incompatible version errors out
|
-- see incompatible version errors out
|
||||||
RESET citus.enable_version_checks;
|
RESET citus.enable_version_checks;
|
||||||
RESET columnar.enable_version_checks;
|
RESET columnar.enable_version_checks;
|
||||||
|
|
|
@ -151,8 +151,34 @@ ORDER BY
|
||||||
shardid
|
shardid
|
||||||
LIMIT 1 OFFSET 1;
|
LIMIT 1 OFFSET 1;
|
||||||
|
|
||||||
|
-- Check that shards of a table with GENERATED columns can be moved.
|
||||||
|
\c - - - :master_port
|
||||||
|
SET citus.shard_count TO 4;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
|
||||||
|
CREATE TABLE mx_table_with_generated_column (a int, b int GENERATED ALWAYS AS ( a + 3 ) STORED, c int);
|
||||||
|
SELECT create_distributed_table('mx_table_with_generated_column', 'a');
|
||||||
|
|
||||||
|
-- Check that dropped columns are handled properly in a move.
|
||||||
|
ALTER TABLE mx_table_with_generated_column DROP COLUMN c;
|
||||||
|
|
||||||
|
-- Move a shard from worker 1 to worker 2
|
||||||
|
SELECT
|
||||||
|
citus_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical')
|
||||||
|
FROM
|
||||||
|
pg_dist_shard NATURAL JOIN pg_dist_shard_placement
|
||||||
|
WHERE
|
||||||
|
logicalrelid = 'mx_table_with_generated_column'::regclass
|
||||||
|
AND nodeport = :worker_1_port
|
||||||
|
ORDER BY
|
||||||
|
shardid
|
||||||
|
LIMIT 1;
|
||||||
|
|
||||||
-- Cleanup
|
-- Cleanup
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
SET client_min_messages TO WARNING;
|
||||||
|
CALL citus_cleanup_orphaned_resources();
|
||||||
|
DROP TABLE mx_table_with_generated_column;
|
||||||
DROP TABLE mx_table_1;
|
DROP TABLE mx_table_1;
|
||||||
DROP TABLE mx_table_2;
|
DROP TABLE mx_table_2;
|
||||||
DROP TABLE mx_table_3;
|
DROP TABLE mx_table_3;
|
||||||
|
|
|
@ -802,6 +802,9 @@ ALTER TABLE IF EXISTS non_existent_table SET SCHEMA non_existent_schema;
|
||||||
DROP SCHEMA existing_schema, another_existing_schema CASCADE;
|
DROP SCHEMA existing_schema, another_existing_schema CASCADE;
|
||||||
|
|
||||||
|
|
||||||
|
-- test DROP SCHEMA with nonexisting schemas
|
||||||
|
DROP SCHEMA ax, bx, cx, dx, ex, fx, gx, jx;
|
||||||
|
|
||||||
-- test ALTER TABLE SET SCHEMA with interesting names
|
-- test ALTER TABLE SET SCHEMA with interesting names
|
||||||
CREATE SCHEMA "cItuS.T E E N'sSchema";
|
CREATE SCHEMA "cItuS.T E E N'sSchema";
|
||||||
CREATE SCHEMA "citus-teen's scnd schm.";
|
CREATE SCHEMA "citus-teen's scnd schm.";
|
||||||
|
@ -968,6 +971,7 @@ SET client_min_messages TO WARNING;
|
||||||
|
|
||||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
||||||
WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema');
|
WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema');
|
||||||
|
DROP TABLE public.nation_local;
|
||||||
DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE;
|
DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE;
|
||||||
-- verify that the dropped schema is removed from worker's pg_dist_object
|
-- verify that the dropped schema is removed from worker's pg_dist_object
|
||||||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
|
||||||
|
|
|
@ -137,6 +137,30 @@ FETCH FORWARD 3 FROM holdCursor;
|
||||||
|
|
||||||
CLOSE holdCursor;
|
CLOSE holdCursor;
|
||||||
|
|
||||||
|
-- Test DECLARE CURSOR .. WITH HOLD inside transaction block
|
||||||
|
BEGIN;
|
||||||
|
DECLARE holdCursor CURSOR WITH HOLD FOR
|
||||||
|
SELECT * FROM cursor_me WHERE x = 1 ORDER BY y;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
FETCH BACKWARD 3 FROM holdCursor;
|
||||||
|
FETCH FORWARD 3 FROM holdCursor;
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
CLOSE holdCursor;
|
||||||
|
|
||||||
|
-- Test DECLARE NO SCROLL CURSOR .. WITH HOLD inside transaction block
|
||||||
|
BEGIN;
|
||||||
|
DECLARE holdCursor NO SCROLL CURSOR WITH HOLD FOR
|
||||||
|
SELECT * FROM cursor_me WHERE x = 1 ORDER BY y;
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
FETCH FORWARD 3 FROM holdCursor;
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
FETCH 3 FROM holdCursor;
|
||||||
|
FETCH BACKWARD 3 FROM holdCursor;
|
||||||
|
CLOSE holdCursor;
|
||||||
|
|
||||||
-- Test DECLARE CURSOR .. WITH HOLD with parameter
|
-- Test DECLARE CURSOR .. WITH HOLD with parameter
|
||||||
CREATE OR REPLACE FUNCTION declares_cursor(p int)
|
CREATE OR REPLACE FUNCTION declares_cursor(p int)
|
||||||
RETURNS void AS $$
|
RETURNS void AS $$
|
||||||
|
|
|
@ -110,8 +110,66 @@ SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_810700
|
||||||
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016";
|
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016";
|
||||||
-- END: List updated row count for local targets shard.
|
-- END: List updated row count for local targets shard.
|
||||||
|
|
||||||
|
-- Check that GENERATED columns are handled properly in a shard split operation.
|
||||||
|
\c - - - :master_port
|
||||||
|
SET search_path TO worker_split_copy_test;
|
||||||
|
SET citus.shard_count TO 2;
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.next_shard_id TO 81080000;
|
||||||
|
|
||||||
|
-- BEGIN: Create distributed table and insert data.
|
||||||
|
CREATE TABLE worker_split_copy_test.dist_table_with_generated_col(id int primary key, new_id int GENERATED ALWAYS AS ( id + 3 ) stored, value char, col_todrop int);
|
||||||
|
SELECT create_distributed_table('dist_table_with_generated_col', 'id');
|
||||||
|
|
||||||
|
-- Check that dropped columns are filtered out in COPY command.
|
||||||
|
ALTER TABLE dist_table_with_generated_col DROP COLUMN col_todrop;
|
||||||
|
|
||||||
|
INSERT INTO dist_table_with_generated_col (id, value) (SELECT g.id, 'N' FROM generate_series(1, 1000) AS g(id));
|
||||||
|
|
||||||
|
-- END: Create distributed table and insert data.
|
||||||
|
|
||||||
|
-- BEGIN: Create target shards in Worker1 and Worker2 for a 2-way split copy.
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
CREATE TABLE worker_split_copy_test.dist_table_with_generated_col_81080015(id int primary key, new_id int GENERATED ALWAYS AS ( id + 3 ) stored, value char);
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
CREATE TABLE worker_split_copy_test.dist_table_with_generated_col_81080016(id int primary key, new_id int GENERATED ALWAYS AS ( id + 3 ) stored, value char);
|
||||||
|
|
||||||
|
-- BEGIN: List row count for source shard and targets shard in Worker1.
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT COUNT(*) FROM worker_split_copy_test.dist_table_with_generated_col_81080000;
|
||||||
|
SELECT COUNT(*) FROM worker_split_copy_test.dist_table_with_generated_col_81080015;
|
||||||
|
|
||||||
|
-- BEGIN: List row count for target shard in Worker2.
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
SELECT COUNT(*) FROM worker_split_copy_test.dist_table_with_generated_col_81080016;
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT * from worker_split_copy(
|
||||||
|
81080000, -- source shard id to copy
|
||||||
|
'id',
|
||||||
|
ARRAY[
|
||||||
|
-- split copy info for split children 1
|
||||||
|
ROW(81080015, -- destination shard id
|
||||||
|
-2147483648, -- split range begin
|
||||||
|
-1073741824, --split range end
|
||||||
|
:worker_1_node)::pg_catalog.split_copy_info,
|
||||||
|
-- split copy info for split children 2
|
||||||
|
ROW(81080016, --destination shard id
|
||||||
|
-1073741823, --split range begin
|
||||||
|
-1, --split range end
|
||||||
|
:worker_2_node)::pg_catalog.split_copy_info
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
|
\c - - - :worker_1_port
|
||||||
|
SELECT COUNT(*) FROM worker_split_copy_test.dist_table_with_generated_col_81080015;
|
||||||
|
|
||||||
|
\c - - - :worker_2_port
|
||||||
|
SELECT COUNT(*) FROM worker_split_copy_test.dist_table_with_generated_col_81080016;
|
||||||
|
|
||||||
-- BEGIN: CLEANUP.
|
-- BEGIN: CLEANUP.
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET client_min_messages TO WARNING;
|
SET client_min_messages TO WARNING;
|
||||||
|
CALL citus_cleanup_orphaned_resources();
|
||||||
DROP SCHEMA worker_split_copy_test CASCADE;
|
DROP SCHEMA worker_split_copy_test CASCADE;
|
||||||
-- END: CLEANUP.
|
-- END: CLEANUP.
|
||||||
|
|
Loading…
Reference in New Issue