mirror of https://github.com/citusdata/citus.git
Compare commits
28 Commits
Author | SHA1 | Date |
---|---|---|
|
cd1e706acb | |
|
3d4d76fdde | |
|
45671a1caa | |
|
1eec630640 | |
|
5dc2fae9d6 | |
|
0f498ac26d | |
|
44459be1ab | |
|
8401acb761 | |
|
26556b2bba | |
|
7480160f4f | |
|
23951c562e | |
|
51560f9644 | |
|
9f27e398a9 | |
|
5bb4bb4b5f | |
|
a7ff0c5800 | |
|
6703b173a0 | |
|
2efeed412a | |
|
49ce36fe8b | |
|
043c3356ae | |
|
a603ad9cbf | |
|
4a1255fd10 | |
|
67004edf43 | |
|
789d441296 | |
|
6d06e9760a | |
|
74f0dd0c25 | |
|
e777daad22 | |
|
4e373fadd8 | |
|
35703d5e61 |
|
@ -5,47 +5,32 @@ orbs:
|
|||
|
||||
jobs:
|
||||
|
||||
build-11:
|
||||
build:
|
||||
description: Build the citus extension
|
||||
parameters:
|
||||
pg_major:
|
||||
description: postgres major version building citus for
|
||||
type: integer
|
||||
image:
|
||||
description: docker image to use for the build
|
||||
type: string
|
||||
default: citus/extbuilder
|
||||
image_tag:
|
||||
description: tag to use for the docker image
|
||||
type: string
|
||||
docker:
|
||||
- image: 'citus/extbuilder:11.9'
|
||||
- image: '<< parameters.image >>:<< parameters.image_tag >>'
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: 'Configure, Build, and Install'
|
||||
command: build-ext
|
||||
command: |
|
||||
./ci/build-citus.sh
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- build-11/*
|
||||
- install-11.tar
|
||||
|
||||
build-12:
|
||||
docker:
|
||||
- image: 'citus/extbuilder:12.4'
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: 'Configure, Build, and Install'
|
||||
command: build-ext
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- build-12/*
|
||||
- install-12.tar
|
||||
|
||||
build-13:
|
||||
docker:
|
||||
- image: 'citus/extbuilder:13.0'
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: 'Configure, Build, and Install'
|
||||
command: build-ext
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- build-13/*
|
||||
- install-13.tar
|
||||
- build-<< parameters.pg_major >>/*
|
||||
- install-<<parameters.pg_major >>.tar
|
||||
|
||||
check-style:
|
||||
docker:
|
||||
|
@ -91,6 +76,7 @@ jobs:
|
|||
- run:
|
||||
name: 'Check if all CI scripts are actually run'
|
||||
command: ci/check_all_ci_scripts_are_run.sh
|
||||
|
||||
check-sql-snapshots:
|
||||
docker:
|
||||
- image: 'citus/extbuilder:latest'
|
||||
|
@ -99,392 +85,230 @@ jobs:
|
|||
- run:
|
||||
name: 'Check Snapshots'
|
||||
command: ci/check_sql_snapshots.sh
|
||||
test-11_check-multi:
|
||||
|
||||
test-pg-upgrade:
|
||||
description: Runs postgres upgrade tests
|
||||
parameters:
|
||||
old_pg_major:
|
||||
description: 'postgres major version to use before the upgrade'
|
||||
type: integer
|
||||
new_pg_major:
|
||||
description: 'postgres major version to upgrade to'
|
||||
type: integer
|
||||
image:
|
||||
description: 'docker image to use as for the tests'
|
||||
type: string
|
||||
default: citus/pgupgradetester
|
||||
image_tag:
|
||||
description: 'docker image tag to use'
|
||||
type: string
|
||||
default: latest
|
||||
docker:
|
||||
- image: 'citus/exttester:11.9'
|
||||
- image: '<< parameters.image >>:<< parameters.image_tag >>'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-multi)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-multi'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_11,multi'
|
||||
|
||||
|
||||
|
||||
test-11_check-vanilla:
|
||||
docker:
|
||||
- image: 'citus/exttester:11.9'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
name: 'Install Extension'
|
||||
command: |
|
||||
tar xfv "${CIRCLE_WORKING_DIRECTORY}/install-<< parameters.old_pg_major >>.tar" --directory /
|
||||
tar xfv "${CIRCLE_WORKING_DIRECTORY}/install-<< parameters.new_pg_major >>.tar" --directory /
|
||||
- run:
|
||||
name: 'Install and Test (check-vanilla)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-vanilla'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_11,vanilla'
|
||||
|
||||
test-11_check-mx:
|
||||
docker:
|
||||
- image: 'citus/exttester:11.9'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-mx)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-multi-mx'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_11,mx'
|
||||
|
||||
test-11_check-worker:
|
||||
docker:
|
||||
- image: 'citus/exttester:11.9'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-worker)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-worker'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_11,worker'
|
||||
|
||||
test-11_check-isolation:
|
||||
docker:
|
||||
- image: 'citus/exttester:11.9'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-isolation)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-isolation'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_11,isolation'
|
||||
test-11_check-follower-cluster:
|
||||
docker:
|
||||
- image: 'citus/exttester:11.9'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
name: 'Configure'
|
||||
command: |
|
||||
chown -R circleci .
|
||||
gosu circleci ./configure
|
||||
- run:
|
||||
name: 'Enable core dumps'
|
||||
command: 'ulimit -c unlimited'
|
||||
command: |
|
||||
ulimit -c unlimited
|
||||
- run:
|
||||
name: 'Install and Test (follower-cluster)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-follower-cluster'
|
||||
name: 'Install and test postgres upgrade'
|
||||
command: |
|
||||
gosu circleci \
|
||||
make -C src/test/regress \
|
||||
check-pg-upgrade \
|
||||
old-bindir=/usr/lib/postgresql/<< parameters.old_pg_major >>/bin \
|
||||
new-bindir=/usr/lib/postgresql/<< parameters.new_pg_major >>/bin
|
||||
no_output_timeout: 2m
|
||||
- run:
|
||||
name: 'Regressions'
|
||||
command: |
|
||||
if [ -f "src/test/regress/regression.diffs" ]; then
|
||||
cat src/test/regress/regression.diffs
|
||||
exit 1
|
||||
fi
|
||||
when: on_fail
|
||||
- run:
|
||||
name: 'Copy coredumps'
|
||||
command: |
|
||||
mkdir -p /tmp/core_dumps
|
||||
cp core.* /tmp/core_dumps
|
||||
if ls core.* 1> /dev/null 2>&1; then
|
||||
cp core.* /tmp/core_dumps
|
||||
fi
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
name: 'Save regressions'
|
||||
path: src/test/regress/regression.diffs
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
name: 'Save core dumps'
|
||||
path: /tmp/core_dumps
|
||||
when: on_fail
|
||||
- codecov/upload:
|
||||
flags: 'test_11,follower-cluster'
|
||||
- store_artifacts:
|
||||
path: '/tmp/core_dumps'
|
||||
test-11_check-failure:
|
||||
flags: 'test_<< parameters.old_pg_major >>_<< parameters.new_pg_major >>,upgrade'
|
||||
|
||||
test-citus-upgrade:
|
||||
description: Runs citus upgrade tests
|
||||
parameters:
|
||||
pg_major:
|
||||
description: "postgres major version"
|
||||
type: integer
|
||||
image:
|
||||
description: 'docker image to use as for the tests'
|
||||
type: string
|
||||
default: citus/citusupgradetester
|
||||
image_tag:
|
||||
description: 'docker image tag to use'
|
||||
type: string
|
||||
docker:
|
||||
- image: 'citus/failtester:11.9'
|
||||
- image: '<< parameters.image >>:<< parameters.image_tag >>'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-failure)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-failure'
|
||||
no_output_timeout: 2m
|
||||
|
||||
test-11-12_check-pg-upgrade:
|
||||
docker:
|
||||
- image: 'citus/pgupgradetester:latest'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and test postgres upgrade'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext --target check-pg-upgrade --old-pg-version 11 --new-pg-version 12'
|
||||
no_output_timeout: 2m
|
||||
|
||||
test-12-13_check-pg-upgrade:
|
||||
docker:
|
||||
- image: 'citus/pgupgradetester:latest'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and test postgres upgrade'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext --target check-pg-upgrade --old-pg-version 12 --new-pg-version 13'
|
||||
no_output_timeout: 2m
|
||||
|
||||
test-12_check-multi:
|
||||
docker:
|
||||
- image: 'citus/exttester:12.4'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-multi)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-multi'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_12,multi'
|
||||
test-12_check-vanilla:
|
||||
docker:
|
||||
- image: 'citus/exttester:12.4'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-vanilla)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-vanilla'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_12,vanilla'
|
||||
|
||||
test-12_check-mx:
|
||||
docker:
|
||||
- image: 'citus/exttester:12.4'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-mx)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-multi-mx'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_12,mx'
|
||||
|
||||
test-12_check-isolation:
|
||||
docker:
|
||||
- image: 'citus/exttester:12.4'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-isolation)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-isolation'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_12,isolation'
|
||||
|
||||
test-12_check-worker:
|
||||
docker:
|
||||
- image: 'citus/exttester:12.4'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-worker)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-worker'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_12,worker'
|
||||
|
||||
test-12_check-follower-cluster:
|
||||
docker:
|
||||
- image: 'citus/exttester:12.4'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
name: 'Configure'
|
||||
command: |
|
||||
chown -R circleci .
|
||||
gosu circleci ./configure
|
||||
- run:
|
||||
name: 'Enable core dumps'
|
||||
command: 'ulimit -c unlimited'
|
||||
- run:
|
||||
name: 'Install and Test (follower-cluster)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-follower-cluster'
|
||||
no_output_timeout: 2m
|
||||
- run:
|
||||
command: |
|
||||
mkdir -p /tmp/core_dumps
|
||||
cp core.* /tmp/core_dumps
|
||||
when: on_fail
|
||||
- codecov/upload:
|
||||
flags: 'test_12,follower-cluster'
|
||||
- store_artifacts:
|
||||
path: '/tmp/core_dumps'
|
||||
|
||||
test-12_check-failure:
|
||||
docker:
|
||||
- image: 'citus/failtester:12.4'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-failure)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-failure'
|
||||
no_output_timeout: 2m
|
||||
|
||||
test-11_check-citus-upgrade:
|
||||
docker:
|
||||
- image: 'citus/citusupgradetester:11.9'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
ulimit -c unlimited
|
||||
- run:
|
||||
name: 'Install and test citus upgrade'
|
||||
command: |
|
||||
chown -R circleci:circleci /home/circleci
|
||||
install-and-test-ext --target check-citus-upgrade --citus-pre-tar /install-pg11-citusv8.0.0.tar
|
||||
install-and-test-ext --target check-citus-upgrade --citus-pre-tar /install-pg11-citusv8.1.0.tar
|
||||
install-and-test-ext --target check-citus-upgrade --citus-pre-tar /install-pg11-citusv8.2.0.tar
|
||||
install-and-test-ext --target check-citus-upgrade --citus-pre-tar /install-pg11-citusv8.3.0.tar
|
||||
# run make check-citus-upgrade for all citus versions
|
||||
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
|
||||
for citus_version in ${CITUS_VERSIONS}; do \
|
||||
gosu circleci \
|
||||
make -C src/test/regress \
|
||||
check-citus-upgrade \
|
||||
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
|
||||
citus-pre-tar=/install-pg11-citus${citus_version}.tar \
|
||||
citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \
|
||||
done;
|
||||
|
||||
install-and-test-ext --target check-citus-upgrade-mixed --citus-pre-tar /install-pg11-citusv8.0.0.tar
|
||||
install-and-test-ext --target check-citus-upgrade-mixed --citus-pre-tar /install-pg11-citusv8.1.0.tar
|
||||
install-and-test-ext --target check-citus-upgrade-mixed --citus-pre-tar /install-pg11-citusv8.2.0.tar
|
||||
install-and-test-ext --target check-citus-upgrade-mixed --citus-pre-tar /install-pg11-citusv8.3.0.tar
|
||||
no_output_timeout: 2m
|
||||
|
||||
test-13_check-multi:
|
||||
docker:
|
||||
- image: 'citus/exttester:13.0'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-multi)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-multi'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_13,multi'
|
||||
|
||||
test-13_check-mx:
|
||||
docker:
|
||||
- image: 'citus/exttester:13.0'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-mx)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-multi-mx'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_13,mx'
|
||||
|
||||
test-13_check-vanilla:
|
||||
docker:
|
||||
- image: 'citus/exttester:13.0'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-vanilla)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-vanilla'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_13,vanilla'
|
||||
|
||||
test-13_check-worker:
|
||||
docker:
|
||||
- image: 'citus/exttester:13.0'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-worker)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-worker'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_13,worker'
|
||||
|
||||
test-13_check-isolation:
|
||||
docker:
|
||||
- image: 'citus/exttester:13.0'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-isolation)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-isolation'
|
||||
no_output_timeout: 2m
|
||||
- codecov/upload:
|
||||
flags: 'test_13,isolation'
|
||||
|
||||
test-13_check-follower-cluster:
|
||||
docker:
|
||||
- image: 'citus/exttester:13.0'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Enable core dumps'
|
||||
command: 'ulimit -c unlimited'
|
||||
- run:
|
||||
name: 'Install and Test (follower-cluster)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-follower-cluster'
|
||||
# run make check-citus-upgrade-mixed for all citus versions
|
||||
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
|
||||
for citus_version in ${CITUS_VERSIONS}; do \
|
||||
gosu circleci \
|
||||
make -C src/test/regress \
|
||||
check-citus-upgrade-mixed \
|
||||
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
|
||||
citus-pre-tar=/install-pg11-citus${citus_version}.tar \
|
||||
citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \
|
||||
done;
|
||||
no_output_timeout: 2m
|
||||
- run:
|
||||
name: 'Regressions'
|
||||
command: |
|
||||
if [ -f "src/test/regress/regression.diffs" ]; then
|
||||
cat src/test/regress/regression.diffs
|
||||
exit 1
|
||||
fi
|
||||
when: on_fail
|
||||
- run:
|
||||
name: 'Copy coredumps'
|
||||
command: |
|
||||
mkdir -p /tmp/core_dumps
|
||||
cp core.* /tmp/core_dumps
|
||||
if ls core.* 1> /dev/null 2>&1; then
|
||||
cp core.* /tmp/core_dumps
|
||||
fi
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
name: 'Save regressions'
|
||||
path: src/test/regress/regression.diffs
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
name: 'Save core dumps'
|
||||
path: /tmp/core_dumps
|
||||
when: on_fail
|
||||
- codecov/upload:
|
||||
flags: 'test_13,follower-cluster'
|
||||
- store_artifacts:
|
||||
path: '/tmp/core_dumps'
|
||||
flags: 'test_<< parameters.pg_major >>,upgrade'
|
||||
|
||||
test-13_check-failure:
|
||||
test-citus:
|
||||
description: Runs the common tests of citus
|
||||
parameters:
|
||||
pg_major:
|
||||
description: "postgres major version"
|
||||
type: integer
|
||||
image:
|
||||
description: 'docker image to use as for the tests'
|
||||
type: string
|
||||
default: citus/exttester
|
||||
image_tag:
|
||||
description: 'docker image tag to use'
|
||||
type: string
|
||||
make:
|
||||
description: "make target"
|
||||
type: string
|
||||
docker:
|
||||
- image: 'citus/failtester:13.0'
|
||||
- image: '<< parameters.image >>:<< parameters.image_tag >>'
|
||||
working_directory: /home/circleci/project
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: 'Install and Test (check-failure)'
|
||||
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-failure'
|
||||
name: 'Install Extension'
|
||||
command: |
|
||||
tar xfv "${CIRCLE_WORKING_DIRECTORY}/install-${PG_MAJOR}.tar" --directory /
|
||||
- run:
|
||||
name: 'Configure'
|
||||
command: |
|
||||
chown -R circleci .
|
||||
gosu circleci ./configure
|
||||
- run:
|
||||
name: 'Enable core dumps'
|
||||
command: |
|
||||
ulimit -c unlimited
|
||||
- run:
|
||||
name: 'Run Test'
|
||||
command: |
|
||||
gosu circleci make -C src/test/regress << parameters.make >>
|
||||
no_output_timeout: 2m
|
||||
- run:
|
||||
name: 'Regressions'
|
||||
command: |
|
||||
if [ -f "src/test/regress/regression.diffs" ]; then
|
||||
cat src/test/regress/regression.diffs
|
||||
exit 1
|
||||
fi
|
||||
when: on_fail
|
||||
- run:
|
||||
name: 'Copy coredumps'
|
||||
command: |
|
||||
mkdir -p /tmp/core_dumps
|
||||
if ls core.* 1> /dev/null 2>&1; then
|
||||
cp core.* /tmp/core_dumps
|
||||
fi
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
name: 'Save regressions'
|
||||
path: src/test/regress/regression.diffs
|
||||
when: on_fail
|
||||
- store_artifacts:
|
||||
name: 'Save core dumps'
|
||||
path: /tmp/core_dumps
|
||||
when: on_fail
|
||||
- codecov/upload:
|
||||
flags: 'test_<< parameters.pg_major >>,<< parameters.make >>'
|
||||
when: always
|
||||
|
||||
check-merge-to-enterprise:
|
||||
docker:
|
||||
|
@ -495,6 +319,7 @@ jobs:
|
|||
- run:
|
||||
command: |
|
||||
ci/check_enterprise_merge.sh
|
||||
|
||||
ch_benchmark:
|
||||
docker:
|
||||
- image: buildpack-deps:stretch
|
||||
|
@ -509,6 +334,7 @@ jobs:
|
|||
sh run_hammerdb.sh citusbot_ch_benchmark_rg
|
||||
name: install dependencies and run ch_benchmark tests
|
||||
no_output_timeout: 20m
|
||||
|
||||
tpcc_benchmark:
|
||||
docker:
|
||||
- image: buildpack-deps:stretch
|
||||
|
@ -524,7 +350,6 @@ jobs:
|
|||
name: install dependencies and run ch_benchmark tests
|
||||
no_output_timeout: 20m
|
||||
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_test:
|
||||
|
@ -536,70 +361,173 @@ workflows:
|
|||
ignore:
|
||||
- /release-[0-9]+\.[0-9]+.*/ # match with releaseX.Y.*
|
||||
|
||||
|
||||
- build-11
|
||||
- build-12
|
||||
- build-13
|
||||
- build:
|
||||
name: build-11
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
- build:
|
||||
name: build-12
|
||||
pg_major: 12
|
||||
image_tag: '12.4'
|
||||
- build:
|
||||
name: build-13
|
||||
pg_major: 13
|
||||
image_tag: '13.0'
|
||||
|
||||
- check-style
|
||||
- check-sql-snapshots
|
||||
|
||||
- test-11_check-multi:
|
||||
- test-citus:
|
||||
name: 'test-11_check-multi'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-multi
|
||||
requires: [build-11]
|
||||
- test-11_check-vanilla:
|
||||
- test-citus:
|
||||
name: 'test-11_check-mx'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-multi-mx
|
||||
requires: [build-11]
|
||||
- test-11_check-isolation:
|
||||
- test-citus:
|
||||
name: 'test-11_check-vanilla'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-vanilla
|
||||
requires: [build-11]
|
||||
- test-11_check-mx:
|
||||
- test-citus:
|
||||
name: 'test-11_check-isolation'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-isolation
|
||||
requires: [build-11]
|
||||
- test-11_check-worker:
|
||||
- test-citus:
|
||||
name: 'test-11_check-worker'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-worker
|
||||
requires: [build-11]
|
||||
- test-11_check-follower-cluster:
|
||||
- test-citus:
|
||||
name: 'test-11_check-follower-cluster'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-follower-cluster
|
||||
requires: [build-11]
|
||||
- test-11_check-failure:
|
||||
- test-citus:
|
||||
name: 'test-11_check-failure'
|
||||
pg_major: 11
|
||||
image: citus/failtester
|
||||
image_tag: '11.9'
|
||||
make: check-failure
|
||||
requires: [build-11]
|
||||
|
||||
- test-12_check-multi:
|
||||
- test-citus:
|
||||
name: 'test-12_check-multi'
|
||||
pg_major: 12
|
||||
image_tag: '12.4'
|
||||
make: check-multi
|
||||
requires: [build-12]
|
||||
- test-12_check-vanilla:
|
||||
- test-citus:
|
||||
name: 'test-12_check-mx'
|
||||
pg_major: 12
|
||||
image_tag: '12.4'
|
||||
make: check-multi-mx
|
||||
requires: [build-12]
|
||||
- test-12_check-isolation:
|
||||
- test-citus:
|
||||
name: 'test-12_check-vanilla'
|
||||
pg_major: 12
|
||||
image_tag: '12.4'
|
||||
make: check-vanilla
|
||||
requires: [build-12]
|
||||
- test-12_check-mx:
|
||||
- test-citus:
|
||||
name: 'test-12_check-isolation'
|
||||
pg_major: 12
|
||||
image_tag: '12.4'
|
||||
make: check-isolation
|
||||
requires: [build-12]
|
||||
- test-12_check-worker:
|
||||
- test-citus:
|
||||
name: 'test-12_check-worker'
|
||||
pg_major: 12
|
||||
image_tag: '12.4'
|
||||
make: check-worker
|
||||
requires: [build-12]
|
||||
- test-12_check-follower-cluster:
|
||||
- test-citus:
|
||||
name: 'test-12_check-follower-cluster'
|
||||
pg_major: 12
|
||||
image_tag: '12.4'
|
||||
make: check-follower-cluster
|
||||
requires: [build-12]
|
||||
- test-12_check-failure:
|
||||
- test-citus:
|
||||
name: 'test-12_check-failure'
|
||||
pg_major: 12
|
||||
image: citus/failtester
|
||||
image_tag: '12.4'
|
||||
make: check-failure
|
||||
requires: [build-12]
|
||||
|
||||
- test-13_check-multi:
|
||||
- test-citus:
|
||||
name: 'test-13_check-multi'
|
||||
pg_major: 13
|
||||
image_tag: '13.0'
|
||||
make: check-multi
|
||||
requires: [build-13]
|
||||
- test-13_check-vanilla:
|
||||
- test-citus:
|
||||
name: 'test-13_check-mx'
|
||||
pg_major: 13
|
||||
image_tag: '13.0'
|
||||
make: check-multi-mx
|
||||
requires: [build-13]
|
||||
- test-13_check-isolation:
|
||||
- test-citus:
|
||||
name: 'test-13_check-vanilla'
|
||||
pg_major: 13
|
||||
image_tag: '13.0'
|
||||
make: check-vanilla
|
||||
requires: [build-13]
|
||||
- test-13_check-mx:
|
||||
- test-citus:
|
||||
name: 'test-13_check-isolation'
|
||||
pg_major: 13
|
||||
image_tag: '13.0'
|
||||
make: check-isolation
|
||||
requires: [build-13]
|
||||
- test-13_check-worker:
|
||||
- test-citus:
|
||||
name: 'test-13_check-worker'
|
||||
pg_major: 13
|
||||
image_tag: '13.0'
|
||||
make: check-worker
|
||||
requires: [build-13]
|
||||
- test-13_check-follower-cluster:
|
||||
requires: [build-13]
|
||||
- test-13_check-failure:
|
||||
- test-citus:
|
||||
name: 'test-13_check-follower-cluster'
|
||||
pg_major: 13
|
||||
image_tag: '13.0'
|
||||
make: check-follower-cluster
|
||||
requires: [build-13]
|
||||
|
||||
- test-11-12_check-pg-upgrade:
|
||||
requires:
|
||||
- build-11
|
||||
- build-12
|
||||
- test-citus:
|
||||
name: 'test-13_check-failure'
|
||||
pg_major: 13
|
||||
image: citus/failtester
|
||||
image_tag: '13.0'
|
||||
make: check-failure
|
||||
requires: [build-13]
|
||||
|
||||
- test-12-13_check-pg-upgrade:
|
||||
requires:
|
||||
- build-12
|
||||
- build-13
|
||||
- test-pg-upgrade:
|
||||
name: 'test-11-12_check-pg-upgrade'
|
||||
old_pg_major: 11
|
||||
new_pg_major: 12
|
||||
image_tag: latest
|
||||
requires: [build-11,build-12]
|
||||
|
||||
- test-11_check-citus-upgrade:
|
||||
- test-pg-upgrade:
|
||||
name: 'test-12-13_check-pg-upgrade'
|
||||
old_pg_major: 12
|
||||
new_pg_major: 13
|
||||
image_tag: latest
|
||||
requires: [build-12,build-13]
|
||||
|
||||
- test-citus-upgrade:
|
||||
name: test-11_check-citus-upgrade
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
requires: [build-11]
|
||||
|
||||
- ch_benchmark:
|
||||
|
|
125
CHANGELOG.md
125
CHANGELOG.md
|
@ -1,3 +1,128 @@
|
|||
### citus v9.5.4 (February 19, 2021) ###
|
||||
|
||||
* Fixes a compatibility issue with pg_audit in utility calls
|
||||
|
||||
### citus v9.5.3 (February 16, 2021) ###
|
||||
|
||||
* Avoids re-using connections for intermediate results
|
||||
|
||||
* Fixes a bug that might cause self-deadlocks when `COPY` used in xact block
|
||||
|
||||
* Fixes a crash that occurs when distributing table after dropping foreign key
|
||||
|
||||
### citus v9.5.2 (January 26, 2021) ###
|
||||
|
||||
* Fixes distributed deadlock detection being blocked by metadata sync
|
||||
|
||||
* Prevents segfaults when SAVEPOINT handling cannot recover from connection
|
||||
failures
|
||||
|
||||
* Fixes possible issues that might occur with single shard distributed tables
|
||||
|
||||
### citus v9.5.1 (December 1, 2020) ###
|
||||
|
||||
* Enables PostgreSQL's parallel queries on EXPLAIN ANALYZE
|
||||
|
||||
* Fixes a bug that could cause excessive memory consumption when a partition is
|
||||
created
|
||||
|
||||
* Fixes a bug that triggers subplan executions unnecessarily with cursors
|
||||
|
||||
* Fixes a segfault in connection management due to invalid connection hash
|
||||
entries
|
||||
|
||||
### citus v9.5.0 (November 10, 2020) ###
|
||||
|
||||
* Adds support for PostgreSQL 13
|
||||
|
||||
* Removes the task-tracker executor
|
||||
|
||||
* Introduces citus local tables
|
||||
|
||||
* Introduces `undistribute_table` UDF to convert tables back to postgres tables
|
||||
|
||||
* Adds support for `EXPLAIN (ANALYZE) EXECUTE` and `EXPLAIN EXECUTE`
|
||||
|
||||
* Adds support for `EXPLAIN (ANALYZE, WAL)` for PG13
|
||||
|
||||
* Sorts the output of `EXPLAIN (ANALYZE)` by execution duration.
|
||||
|
||||
* Adds support for CREATE TABLE ... USING table_access_method
|
||||
|
||||
* Adds support for `WITH TIES` option in SELECT and INSERT SELECT queries
|
||||
|
||||
* Avoids taking multi-shard locks on workers
|
||||
|
||||
* Enforces `citus.max_shared_pool_size` config in COPY queries
|
||||
|
||||
* Enables custom aggregates with multiple parameters to be executed on workers
|
||||
|
||||
* Enforces `citus.max_intermediate_result_size` in local execution
|
||||
|
||||
* Improves cost estimation of INSERT SELECT plans
|
||||
|
||||
* Introduces delegation of procedures that read from reference tables
|
||||
|
||||
* Prevents pull-push execution for simple pushdownable subqueries
|
||||
|
||||
* Improves error message when creating a foreign key to a local table
|
||||
|
||||
* Makes `citus_prepare_pg_upgrade` idempotent by dropping transition tables
|
||||
|
||||
* Disallows `ON TRUE` outer joins with reference & distributed tables when
|
||||
reference table is outer relation to avoid incorrect results
|
||||
|
||||
* Disallows field indirection in INSERT/UPDATE queries to avoid incorrect
|
||||
results
|
||||
|
||||
* Disallows volatile functions in UPDATE subqueries to avoid incorrect results
|
||||
|
||||
* Fixes CREATE INDEX CONCURRENTLY crash with local execution
|
||||
|
||||
* Fixes `citus_finish_pg_upgrade` to drop all backup tables
|
||||
|
||||
* Fixes a bug that cause failures when `RECURSIVE VIEW` joined reference table
|
||||
|
||||
* Fixes DROP SEQUENCE failures when metadata syncing is enabled
|
||||
|
||||
* Fixes a bug that caused CREATE TABLE with CHECK constraint to fail
|
||||
|
||||
* Fixes a bug that could cause VACUUM to deadlock
|
||||
|
||||
* Fixes master_update_node failure when no background worker slots are available
|
||||
|
||||
* Fixes a bug that caused replica identity to not be propagated on shard repair
|
||||
|
||||
* Fixes a bug that could cause crashes after connection timeouts
|
||||
|
||||
* Fixes a bug that could cause crashes with certain compile flags
|
||||
|
||||
* Fixes a bug that could cause deadlocks on CREATE INDEX
|
||||
|
||||
* Fixes a bug with genetic query optimization in outer joins
|
||||
|
||||
* Fixes a crash when aggregating empty tables
|
||||
|
||||
* Fixes a crash with inserting domain constrained composite types
|
||||
|
||||
* Fixes a crash with multi-row & router INSERT's in local execution
|
||||
|
||||
* Fixes a possibility of doing temporary file cleanup more than once
|
||||
|
||||
* Fixes incorrect setting of join related fields
|
||||
|
||||
* Fixes memory issues around deparsing index commands
|
||||
|
||||
* Fixes reference table access tracking for sequential execution
|
||||
|
||||
* Fixes removal of a single node with only reference tables
|
||||
|
||||
* Fixes sending commands to coordinator when it is added as a worker
|
||||
|
||||
* Fixes write queries with const expressions and COLLATE in various places
|
||||
|
||||
* Fixes wrong cancellation message about distributed deadlock
|
||||
|
||||
### citus v9.4.2 (October 21, 2020) ###
|
||||
|
||||
* Fixes a bug that could lead to multiple maintenance daemons
|
||||
|
|
10
ci/README.md
10
ci/README.md
|
@ -46,6 +46,16 @@ following:
|
|||
requires also adding a comment before explaining why this specific use of the
|
||||
function is safe.
|
||||
|
||||
## `build-citus.sh`
|
||||
|
||||
This is the script used during the build phase of the extension. Historically this script
|
||||
was embedded in the docker images. This made maintenance a hassle. Now it lives in tree
|
||||
with the rest of the source code.
|
||||
|
||||
When this script fails you most likely have a build error on the postgres version it was
|
||||
building at the time of the failure. Fix the compile error and push a new version of your
|
||||
code to fix.
|
||||
|
||||
## `check_enterprise_merge.sh`
|
||||
|
||||
This check exists to make sure that we can always merge the `master` branch of
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
#!/bin/bash
|
||||
|
||||
# make bash behave
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source ci/ci_helpers.sh
|
||||
|
||||
# read pg major version, error if not provided
|
||||
PG_MAJOR=${PG_MAJOR:?please provide the postgres major version}
|
||||
|
||||
# get codename from release file
|
||||
. /etc/os-release
|
||||
codename=${VERSION#*(}
|
||||
codename=${codename%)*}
|
||||
|
||||
# get project from argument
|
||||
project="${CIRCLE_PROJECT_REPONAME}"
|
||||
|
||||
# we'll do everything with absolute paths
|
||||
basedir="$(pwd)"
|
||||
|
||||
# get the project and clear out the git repo (reduce workspace size
|
||||
rm -rf "${basedir}/.git"
|
||||
|
||||
build_ext() {
|
||||
pg_major="$1"
|
||||
|
||||
builddir="${basedir}/build-${pg_major}"
|
||||
echo "Beginning build of ${project} for PostgreSQL ${pg_major}..." >&2
|
||||
|
||||
# do everything in a subdirectory to avoid clutter in current directory
|
||||
mkdir -p "${builddir}" && cd "${builddir}"
|
||||
|
||||
CFLAGS=-Werror "${basedir}/configure" PG_CONFIG="/usr/lib/postgresql/${pg_major}/bin/pg_config" --enable-coverage
|
||||
|
||||
installdir="${builddir}/install"
|
||||
make -j$(nproc) && mkdir -p "${installdir}" && { make DESTDIR="${installdir}" install-all || make DESTDIR="${installdir}" install ; }
|
||||
|
||||
cd "${installdir}" && find . -type f -print > "${builddir}/files.lst"
|
||||
tar cvf "${basedir}/install-${pg_major}.tar" `cat ${builddir}/files.lst`
|
||||
|
||||
cd "${builddir}" && rm -rf install files.lst && make clean
|
||||
}
|
||||
|
||||
build_ext "${PG_MAJOR}"
|
|
@ -1,6 +1,6 @@
|
|||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 9.5devel.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 9.5.4.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||
|
@ -579,8 +579,8 @@ MAKEFLAGS=
|
|||
# Identity of this package.
|
||||
PACKAGE_NAME='Citus'
|
||||
PACKAGE_TARNAME='citus'
|
||||
PACKAGE_VERSION='9.5devel'
|
||||
PACKAGE_STRING='Citus 9.5devel'
|
||||
PACKAGE_VERSION='9.5.4'
|
||||
PACKAGE_STRING='Citus 9.5.4'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
|
@ -664,6 +664,7 @@ infodir
|
|||
docdir
|
||||
oldincludedir
|
||||
includedir
|
||||
runstatedir
|
||||
localstatedir
|
||||
sharedstatedir
|
||||
sysconfdir
|
||||
|
@ -740,6 +741,7 @@ datadir='${datarootdir}'
|
|||
sysconfdir='${prefix}/etc'
|
||||
sharedstatedir='${prefix}/com'
|
||||
localstatedir='${prefix}/var'
|
||||
runstatedir='${localstatedir}/run'
|
||||
includedir='${prefix}/include'
|
||||
oldincludedir='/usr/include'
|
||||
docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
|
||||
|
@ -992,6 +994,15 @@ do
|
|||
| -silent | --silent | --silen | --sile | --sil)
|
||||
silent=yes ;;
|
||||
|
||||
-runstatedir | --runstatedir | --runstatedi | --runstated \
|
||||
| --runstate | --runstat | --runsta | --runst | --runs \
|
||||
| --run | --ru | --r)
|
||||
ac_prev=runstatedir ;;
|
||||
-runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
|
||||
| --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
|
||||
| --run=* | --ru=* | --r=*)
|
||||
runstatedir=$ac_optarg ;;
|
||||
|
||||
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
|
||||
ac_prev=sbindir ;;
|
||||
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
|
||||
|
@ -1129,7 +1140,7 @@ fi
|
|||
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
|
||||
datadir sysconfdir sharedstatedir localstatedir includedir \
|
||||
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
|
||||
libdir localedir mandir
|
||||
libdir localedir mandir runstatedir
|
||||
do
|
||||
eval ac_val=\$$ac_var
|
||||
# Remove trailing slashes.
|
||||
|
@ -1242,7 +1253,7 @@ if test "$ac_init_help" = "long"; then
|
|||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures Citus 9.5devel to adapt to many kinds of systems.
|
||||
\`configure' configures Citus 9.5.4 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
|
@ -1282,6 +1293,7 @@ Fine tuning of the installation directories:
|
|||
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
|
||||
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
|
||||
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
|
||||
--runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
|
||||
--libdir=DIR object code libraries [EPREFIX/lib]
|
||||
--includedir=DIR C header files [PREFIX/include]
|
||||
--oldincludedir=DIR C header files for non-gcc [/usr/include]
|
||||
|
@ -1303,7 +1315,7 @@ fi
|
|||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of Citus 9.5devel:";;
|
||||
short | recursive ) echo "Configuration of Citus 9.5.4:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
|
@ -1403,7 +1415,7 @@ fi
|
|||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
Citus configure 9.5devel
|
||||
Citus configure 9.5.4
|
||||
generated by GNU Autoconf 2.69
|
||||
|
||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||
|
@ -1886,7 +1898,7 @@ cat >config.log <<_ACEOF
|
|||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by Citus $as_me 9.5devel, which was
|
||||
It was created by Citus $as_me 9.5.4, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
$ $0 $@
|
||||
|
@ -5055,7 +5067,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
|||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by Citus $as_me 9.5devel, which was
|
||||
This file was extended by Citus $as_me 9.5.4, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
|
@ -5117,7 +5129,7 @@ _ACEOF
|
|||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||
ac_cs_version="\\
|
||||
Citus config.status 9.5devel
|
||||
Citus config.status 9.5.4
|
||||
configured by $0, generated by GNU Autoconf 2.69,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
# everyone needing autoconf installed, the resulting files are checked
|
||||
# into the SCM.
|
||||
|
||||
AC_INIT([Citus], [9.5devel])
|
||||
AC_INIT([Citus], [9.5.4])
|
||||
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
||||
|
||||
# we'll need sed and awk for some of the version commands
|
||||
|
|
|
@ -569,7 +569,7 @@ ExecuteAndLogDDLCommand(const char *commandString)
|
|||
ereport(DEBUG4, (errmsg("executing \"%s\"", commandString)));
|
||||
|
||||
Node *parseTree = ParseTreeNode(commandString);
|
||||
CitusProcessUtility(parseTree, commandString, PROCESS_UTILITY_TOPLEVEL,
|
||||
CitusProcessUtility(parseTree, commandString, PROCESS_UTILITY_QUERY,
|
||||
NULL, None_Receiver, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -1658,7 +1658,7 @@ UndistributeTable(Oid relationId)
|
|||
Node *parseTree = ParseTreeNode(tableCreationCommand);
|
||||
|
||||
RelayEventExtendNames(parseTree, schemaName, hashOfName);
|
||||
CitusProcessUtility(parseTree, tableCreationCommand, PROCESS_UTILITY_TOPLEVEL,
|
||||
CitusProcessUtility(parseTree, tableCreationCommand, PROCESS_UTILITY_QUERY,
|
||||
NULL, None_Receiver, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -64,6 +64,8 @@ static void ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple,
|
|||
Var *referencedDistColumn,
|
||||
int *referencingAttrIndex,
|
||||
int *referencedAttrIndex);
|
||||
static List * GetForeignKeyIdsForColumn(char *columnName, Oid relationId,
|
||||
int searchForeignKeyColumnFlags);
|
||||
static List * GetForeignConstraintCommandsInternal(Oid relationId, int flags);
|
||||
static Oid get_relation_constraint_oid_compat(HeapTuple heapTuple);
|
||||
static List * GetForeignKeyOidsToCitusLocalTables(Oid relationId);
|
||||
|
@ -483,6 +485,21 @@ ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ColumnAppearsInForeignKey returns true if there is a foreign key constraint
|
||||
* from/to given column. False otherwise.
|
||||
*/
|
||||
bool
|
||||
ColumnAppearsInForeignKey(char *columnName, Oid relationId)
|
||||
{
|
||||
int searchForeignKeyColumnFlags = SEARCH_REFERENCING_RELATION |
|
||||
SEARCH_REFERENCED_RELATION;
|
||||
List *foreignKeysColumnAppeared =
|
||||
GetForeignKeyIdsForColumn(columnName, relationId, searchForeignKeyColumnFlags);
|
||||
return list_length(foreignKeysColumnAppeared) > 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ColumnAppearsInForeignKeyToReferenceTable checks if there is a foreign key
|
||||
* constraint from/to any reference table on the given column.
|
||||
|
@ -490,9 +507,45 @@ ForeignConstraintFindDistKeys(HeapTuple pgConstraintTuple,
|
|||
bool
|
||||
ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId)
|
||||
{
|
||||
int searchForeignKeyColumnFlags = SEARCH_REFERENCING_RELATION |
|
||||
SEARCH_REFERENCED_RELATION;
|
||||
List *foreignKeyIdsColumnAppeared =
|
||||
GetForeignKeyIdsForColumn(columnName, relationId, searchForeignKeyColumnFlags);
|
||||
|
||||
Oid foreignKeyId = InvalidOid;
|
||||
foreach_oid(foreignKeyId, foreignKeyIdsColumnAppeared)
|
||||
{
|
||||
Oid referencedTableId = GetReferencedTableId(foreignKeyId);
|
||||
if (IsCitusTableType(referencedTableId, REFERENCE_TABLE))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetForeignKeyIdsForColumn takes columnName and relationId for the owning
|
||||
* relation, and returns a list of OIDs for foreign constraints that the column
|
||||
* with columnName is involved according to "searchForeignKeyColumnFlags" argument.
|
||||
* See SearchForeignKeyColumnFlags enum definition for usage.
|
||||
*/
|
||||
static List *
|
||||
GetForeignKeyIdsForColumn(char *columnName, Oid relationId,
|
||||
int searchForeignKeyColumnFlags)
|
||||
{
|
||||
bool searchReferencing = searchForeignKeyColumnFlags & SEARCH_REFERENCING_RELATION;
|
||||
bool searchReferenced = searchForeignKeyColumnFlags & SEARCH_REFERENCED_RELATION;
|
||||
|
||||
/* at least one of them should be true */
|
||||
Assert(searchReferencing || searchReferenced);
|
||||
|
||||
List *foreignKeyIdsColumnAppeared = NIL;
|
||||
|
||||
ScanKeyData scanKey[1];
|
||||
int scanKeyCount = 1;
|
||||
bool foreignKeyToReferenceTableIncludesGivenColumn = false;
|
||||
|
||||
Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock);
|
||||
|
||||
|
@ -511,11 +564,11 @@ ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId)
|
|||
Oid referencedTableId = constraintForm->confrelid;
|
||||
Oid referencingTableId = constraintForm->conrelid;
|
||||
|
||||
if (referencedTableId == relationId)
|
||||
if (referencedTableId == relationId && searchReferenced)
|
||||
{
|
||||
pgConstraintKey = Anum_pg_constraint_confkey;
|
||||
}
|
||||
else if (referencingTableId == relationId)
|
||||
else if (referencingTableId == relationId && searchReferencing)
|
||||
{
|
||||
pgConstraintKey = Anum_pg_constraint_conkey;
|
||||
}
|
||||
|
@ -529,22 +582,12 @@ ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId)
|
|||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* We check if the referenced table is a reference table. There cannot be
|
||||
* any foreign constraint from a distributed table to a local table.
|
||||
*/
|
||||
Assert(IsCitusTable(referencedTableId));
|
||||
if (!IsCitusTableType(referencedTableId, REFERENCE_TABLE))
|
||||
{
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (HeapTupleOfForeignConstraintIncludesColumn(heapTuple, relationId,
|
||||
pgConstraintKey, columnName))
|
||||
{
|
||||
foreignKeyToReferenceTableIncludesGivenColumn = true;
|
||||
break;
|
||||
Oid foreignKeyOid = get_relation_constraint_oid_compat(heapTuple);
|
||||
foreignKeyIdsColumnAppeared = lappend_oid(foreignKeyIdsColumnAppeared,
|
||||
foreignKeyOid);
|
||||
}
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
|
@ -554,7 +597,7 @@ ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId)
|
|||
systable_endscan(scanDescriptor);
|
||||
table_close(pgConstraint, NoLock);
|
||||
|
||||
return foreignKeyToReferenceTableIncludesGivenColumn;
|
||||
return foreignKeyIdsColumnAppeared;
|
||||
}
|
||||
|
||||
|
||||
|
@ -773,31 +816,70 @@ TableReferencing(Oid relationId)
|
|||
|
||||
|
||||
/*
|
||||
* ConstraintIsAForeignKey is a wrapper around GetForeignKeyOidByName that
|
||||
* returns true if the given constraint name identifies a foreign key
|
||||
* constraint defined on relation with relationId.
|
||||
* ConstraintWithNameIsOfType is a wrapper around ConstraintWithNameIsOfType that returns true
|
||||
* if given constraint name identifies a uniqueness constraint, i.e:
|
||||
* - primary key constraint, or
|
||||
* - unique constraint
|
||||
*/
|
||||
bool
|
||||
ConstraintIsAForeignKey(char *inputConstaintName, Oid relationId)
|
||||
ConstraintIsAUniquenessConstraint(char *inputConstaintName, Oid relationId)
|
||||
{
|
||||
Oid foreignKeyId = GetForeignKeyOidByName(inputConstaintName, relationId);
|
||||
return OidIsValid(foreignKeyId);
|
||||
bool isUniqueConstraint = ConstraintWithNameIsOfType(inputConstaintName, relationId,
|
||||
CONSTRAINT_UNIQUE);
|
||||
bool isPrimaryConstraint = ConstraintWithNameIsOfType(inputConstaintName, relationId,
|
||||
CONSTRAINT_PRIMARY);
|
||||
return isUniqueConstraint || isPrimaryConstraint;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetForeignKeyOidByName returns OID of the foreign key with name and defined
|
||||
* on relation with relationId. If there is no such foreign key constraint, then
|
||||
* this function returns InvalidOid.
|
||||
* ConstraintIsAForeignKey is a wrapper around ConstraintWithNameIsOfType that returns true
|
||||
* if given constraint name identifies a foreign key constraint.
|
||||
*/
|
||||
Oid
|
||||
GetForeignKeyOidByName(char *inputConstaintName, Oid relationId)
|
||||
bool
|
||||
ConstraintIsAForeignKey(char *inputConstaintName, Oid relationId)
|
||||
{
|
||||
int flags = INCLUDE_REFERENCING_CONSTRAINTS;
|
||||
List *foreignKeyOids = GetForeignKeyOids(relationId, flags);
|
||||
return ConstraintWithNameIsOfType(inputConstaintName, relationId, CONSTRAINT_FOREIGN);
|
||||
}
|
||||
|
||||
Oid foreignKeyId = FindForeignKeyOidWithName(foreignKeyOids, inputConstaintName);
|
||||
return foreignKeyId;
|
||||
|
||||
/*
|
||||
* ConstraintWithNameIsOfType is a wrapper around get_relation_constraint_oid that
|
||||
* returns true if given constraint name identifies a valid constraint defined
|
||||
* on relation with relationId and it's type matches the input constraint type.
|
||||
*/
|
||||
bool
|
||||
ConstraintWithNameIsOfType(char *inputConstaintName, Oid relationId,
|
||||
char targetConstraintType)
|
||||
{
|
||||
bool missingOk = true;
|
||||
Oid constraintId =
|
||||
get_relation_constraint_oid(relationId, inputConstaintName, missingOk);
|
||||
return ConstraintWithIdIsOfType(constraintId, targetConstraintType);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ConstraintWithIdIsOfType returns true if constraint with constraintId exists
|
||||
* and is of type targetConstraintType.
|
||||
*/
|
||||
bool
|
||||
ConstraintWithIdIsOfType(Oid constraintId, char targetConstraintType)
|
||||
{
|
||||
HeapTuple heapTuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constraintId));
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
/* no such constraint */
|
||||
return false;
|
||||
}
|
||||
|
||||
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
|
||||
char constraintType = constraintForm->contype;
|
||||
bool constraintTypeMatches = (constraintType == targetConstraintType);
|
||||
|
||||
ReleaseSysCache(heapTuple);
|
||||
|
||||
return constraintTypeMatches;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1138,12 +1138,16 @@ TriggerSyncMetadataToPrimaryNodes(void)
|
|||
|
||||
triggerMetadataSync = true;
|
||||
}
|
||||
else if (!workerNode->metadataSynced)
|
||||
{
|
||||
triggerMetadataSync = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* let the maintanince deamon know about the metadata sync */
|
||||
if (triggerMetadataSync)
|
||||
{
|
||||
TriggerMetadataSync(MyDatabaseId);
|
||||
TriggerMetadataSyncOnCommit();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -511,6 +511,11 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand)
|
|||
|
||||
ErrorIfUnsupportedDropIndexStmt(dropIndexStatement);
|
||||
|
||||
if (AnyForeignKeyDependsOnIndex(distributedIndexId))
|
||||
{
|
||||
MarkInvalidateForeignKeyGraph();
|
||||
}
|
||||
|
||||
ddlJob->targetRelationId = distributedRelationId;
|
||||
ddlJob->concurrentIndexCmd = dropIndexStatement->concurrent;
|
||||
|
||||
|
|
|
@ -261,7 +261,8 @@ static CopyShardState * GetShardState(uint64 shardId, HTAB *shardStateHash,
|
|||
copyOutState, bool isCopyToIntermediateFile);
|
||||
static MultiConnection * CopyGetPlacementConnection(HTAB *connectionStateHash,
|
||||
ShardPlacement *placement,
|
||||
bool stopOnFailure);
|
||||
bool stopOnFailure,
|
||||
bool colocatedIntermediateResult);
|
||||
static bool HasReachedAdaptiveExecutorPoolSize(List *connectionStateHash);
|
||||
static MultiConnection * GetLeastUtilisedCopyConnection(List *connectionStateList,
|
||||
char *nodeName, int nodePort);
|
||||
|
@ -2253,8 +2254,9 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
|
||||
/* define the template for the COPY statement that is sent to workers */
|
||||
CopyStmt *copyStatement = makeNode(CopyStmt);
|
||||
|
||||
if (copyDest->intermediateResultIdPrefix != NULL)
|
||||
bool colocatedIntermediateResults =
|
||||
copyDest->intermediateResultIdPrefix != NULL;
|
||||
if (colocatedIntermediateResults)
|
||||
{
|
||||
copyStatement->relation = makeRangeVar(NULL, copyDest->intermediateResultIdPrefix,
|
||||
-1);
|
||||
|
@ -2291,13 +2293,21 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
RecordRelationAccessIfNonDistTable(tableId, PLACEMENT_ACCESS_DML);
|
||||
|
||||
/*
|
||||
* For all the primary (e.g., writable) nodes, reserve a shared connection.
|
||||
* We do this upfront because we cannot know which nodes are going to be
|
||||
* accessed. Since the order of the reservation is important, we need to
|
||||
* do it right here. For the details on why the order important, see
|
||||
* the function.
|
||||
* Colocated intermediate results do not honor citus.max_shared_pool_size,
|
||||
* so we don't need to reserve any connections. Each result file is sent
|
||||
* over a single connection.
|
||||
*/
|
||||
EnsureConnectionPossibilityForPrimaryNodes();
|
||||
if (!colocatedIntermediateResults)
|
||||
{
|
||||
/*
|
||||
* For all the primary (e.g., writable) nodes, reserve a shared connection.
|
||||
* We do this upfront because we cannot know which nodes are going to be
|
||||
* accessed. Since the order of the reservation is important, we need to
|
||||
* do it right here. For the details on why the order important, see
|
||||
* the function.
|
||||
*/
|
||||
EnsureConnectionPossibilityForPrimaryNodes();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -3438,7 +3448,8 @@ InitializeCopyShardState(CopyShardState *shardState,
|
|||
}
|
||||
|
||||
MultiConnection *connection =
|
||||
CopyGetPlacementConnection(connectionStateHash, placement, stopOnFailure);
|
||||
CopyGetPlacementConnection(connectionStateHash, placement, stopOnFailure,
|
||||
isCopyToIntermediateFile);
|
||||
if (connection == NULL)
|
||||
{
|
||||
failedPlacementCount++;
|
||||
|
@ -3534,11 +3545,40 @@ LogLocalCopyExecution(uint64 shardId)
|
|||
* then it reuses the connection. Otherwise, it requests a connection for placement.
|
||||
*/
|
||||
static MultiConnection *
|
||||
CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement, bool
|
||||
stopOnFailure)
|
||||
CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement,
|
||||
bool stopOnFailure, bool colocatedIntermediateResult)
|
||||
{
|
||||
uint32 connectionFlags = FOR_DML;
|
||||
char *nodeUser = CurrentUserName();
|
||||
if (colocatedIntermediateResult)
|
||||
{
|
||||
/*
|
||||
* Colocated intermediate results are just files and not required to use
|
||||
* the same connections with their co-located shards. So, we are free to
|
||||
* use any connection we can get.
|
||||
*
|
||||
* Also, the current connection re-use logic does not know how to handle
|
||||
* intermediate results as the intermediate results always truncates the
|
||||
* existing files. That's why we we use one connection per intermediate
|
||||
* result.
|
||||
*
|
||||
* Also note that we are breaking the guarantees of citus.shared_pool_size
|
||||
* as we cannot rely on optional connections.
|
||||
*/
|
||||
uint32 connectionFlagsForIntermediateResult = 0;
|
||||
MultiConnection *connection =
|
||||
GetNodeConnection(connectionFlagsForIntermediateResult, placement->nodeName,
|
||||
placement->nodePort);
|
||||
|
||||
/*
|
||||
* As noted above, we want each intermediate file to go over
|
||||
* a separate connection.
|
||||
*/
|
||||
ClaimConnectionExclusively(connection);
|
||||
|
||||
/* and, we cannot afford to handle failures when anything goes wrong */
|
||||
MarkRemoteTransactionCritical(connection);
|
||||
|
||||
return connection;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine whether the task has to be assigned to a particular connection
|
||||
|
@ -3546,6 +3586,7 @@ CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement,
|
|||
*/
|
||||
ShardPlacementAccess *placementAccess = CreatePlacementAccess(placement,
|
||||
PLACEMENT_ACCESS_DML);
|
||||
uint32 connectionFlags = FOR_DML;
|
||||
MultiConnection *connection =
|
||||
GetConnectionIfPlacementAccessedInXact(connectionFlags,
|
||||
list_make1(placementAccess), NULL);
|
||||
|
@ -3583,6 +3624,12 @@ CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement,
|
|||
nodeName,
|
||||
nodePort);
|
||||
|
||||
/*
|
||||
* Make sure that the connection management remembers that Citus
|
||||
* accesses this placement over the connection.
|
||||
*/
|
||||
AssignPlacementListToConnection(list_make1(placementAccess), connection);
|
||||
|
||||
return connection;
|
||||
}
|
||||
|
||||
|
@ -3628,6 +3675,7 @@ CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement,
|
|||
connectionFlags |= REQUIRE_CLEAN_CONNECTION;
|
||||
}
|
||||
|
||||
char *nodeUser = CurrentUserName();
|
||||
connection = GetPlacementConnection(connectionFlags, placement, nodeUser);
|
||||
if (connection == NULL)
|
||||
{
|
||||
|
@ -3643,6 +3691,12 @@ CopyGetPlacementConnection(HTAB *connectionStateHash, ShardPlacement *placement,
|
|||
connection =
|
||||
GetLeastUtilisedCopyConnection(copyConnectionStateList, nodeName,
|
||||
nodePort);
|
||||
|
||||
/*
|
||||
* Make sure that the connection management remembers that Citus
|
||||
* accesses this placement over the connection.
|
||||
*/
|
||||
AssignPlacementListToConnection(list_make1(placementAccess), connection);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#include "access/xact.h"
|
||||
#include "catalog/index.h"
|
||||
#include "catalog/pg_class.h"
|
||||
#include "catalog/pg_constraint.h"
|
||||
#include "catalog/pg_depend.h"
|
||||
#include "commands/tablecmds.h"
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/colocation_utils.h"
|
||||
|
@ -27,6 +29,7 @@
|
|||
#include "distributed/listutils.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/metadata/dependency.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/multi_partitioning_utils.h"
|
||||
#include "distributed/reference_table_utils.h"
|
||||
|
@ -49,6 +52,7 @@ static void ErrorIfAlterTableDefinesFKeyFromPostgresToCitusLocalTable(
|
|||
static List * GetAlterTableStmtFKeyConstraintList(AlterTableStmt *alterTableStatement);
|
||||
static List * GetAlterTableCommandFKeyConstraintList(AlterTableCmd *command);
|
||||
static bool AlterTableCommandTypeIsTrigger(AlterTableType alterTableType);
|
||||
static bool AlterTableDropsForeignKey(AlterTableStmt *alterTableStatement);
|
||||
static void ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement);
|
||||
static void ErrorIfCitusLocalTablePartitionCommand(AlterTableCmd *alterTableCmd,
|
||||
Oid parentRelationId);
|
||||
|
@ -384,6 +388,18 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand)
|
|||
*/
|
||||
ErrorIfAlterTableDefinesFKeyFromPostgresToCitusLocalTable(alterTableStatement);
|
||||
|
||||
if (AlterTableDropsForeignKey(alterTableStatement))
|
||||
{
|
||||
/*
|
||||
* The foreign key graph keeps track of the foreign keys including local tables.
|
||||
* So, even if a foreign key on a local table is dropped, we should invalidate
|
||||
* the graph so that the next commands can see the graph up-to-date.
|
||||
* We are aware that utility hook would still invalidate foreign key graph
|
||||
* even when command fails, but currently we are ok with that.
|
||||
*/
|
||||
MarkInvalidateForeignKeyGraph();
|
||||
}
|
||||
|
||||
bool referencingIsLocalTable = !IsCitusTable(leftRelationId);
|
||||
if (referencingIsLocalTable)
|
||||
{
|
||||
|
@ -461,7 +477,9 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand)
|
|||
*/
|
||||
Assert(list_length(commandList) == 1);
|
||||
|
||||
Oid foreignKeyId = GetForeignKeyOidByName(constraintName, leftRelationId);
|
||||
bool missingOk = false;
|
||||
Oid foreignKeyId = get_relation_constraint_oid(leftRelationId,
|
||||
constraintName, missingOk);
|
||||
rightRelationId = GetReferencedTableId(foreignKeyId);
|
||||
}
|
||||
}
|
||||
|
@ -714,6 +732,99 @@ AlterTableCommandTypeIsTrigger(AlterTableType alterTableType)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* AlterTableDropsForeignKey returns true if the given AlterTableStmt drops
|
||||
* a foreign key. False otherwise.
|
||||
*/
|
||||
static bool
|
||||
AlterTableDropsForeignKey(AlterTableStmt *alterTableStatement)
|
||||
{
|
||||
LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||
Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
||||
|
||||
AlterTableCmd *command = NULL;
|
||||
foreach_ptr(command, alterTableStatement->cmds)
|
||||
{
|
||||
AlterTableType alterTableType = command->subtype;
|
||||
|
||||
if (alterTableType == AT_DropColumn)
|
||||
{
|
||||
char *columnName = command->name;
|
||||
if (ColumnAppearsInForeignKey(columnName, relationId))
|
||||
{
|
||||
/* dropping a column in the either side of the fkey will drop the fkey */
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In order to drop the foreign key, other than DROP COLUMN, the command must be
|
||||
* DROP CONSTRAINT command.
|
||||
*/
|
||||
if (alterTableType != AT_DropConstraint)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
char *constraintName = command->name;
|
||||
if (ConstraintIsAForeignKey(constraintName, relationId))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else if (ConstraintIsAUniquenessConstraint(constraintName, relationId))
|
||||
{
|
||||
/*
|
||||
* If the uniqueness constraint of the column that the foreign key depends on
|
||||
* is getting dropped, then the foreign key will also be dropped.
|
||||
*/
|
||||
bool missingOk = false;
|
||||
Oid uniquenessConstraintId =
|
||||
get_relation_constraint_oid(relationId, constraintName, missingOk);
|
||||
Oid indexId = get_constraint_index(uniquenessConstraintId);
|
||||
if (AnyForeignKeyDependsOnIndex(indexId))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AnyForeignKeyDependsOnIndex scans pg_depend and returns true if given index
|
||||
* is valid and any foreign key depends on it.
|
||||
*/
|
||||
bool
|
||||
AnyForeignKeyDependsOnIndex(Oid indexId)
|
||||
{
|
||||
Oid dependentObjectClassId = RelationRelationId;
|
||||
Oid dependentObjectId = indexId;
|
||||
List *dependencyTupleList =
|
||||
GetPgDependTuplesForDependingObjects(dependentObjectClassId, dependentObjectId);
|
||||
|
||||
HeapTuple dependencyTuple = NULL;
|
||||
foreach_ptr(dependencyTuple, dependencyTupleList)
|
||||
{
|
||||
Form_pg_depend dependencyForm = (Form_pg_depend) GETSTRUCT(dependencyTuple);
|
||||
Oid dependingClassId = dependencyForm->classid;
|
||||
if (dependingClassId != ConstraintRelationId)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
Oid dependingObjectId = dependencyForm->objid;
|
||||
if (ConstraintWithIdIsOfType(dependingObjectId, CONSTRAINT_FOREIGN))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessAlterTableStmt issues a warning.
|
||||
* ALTER TABLE ALL IN TABLESPACE statements have their node type as
|
||||
|
@ -1339,21 +1450,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
break;
|
||||
}
|
||||
|
||||
case AT_DropConstraint:
|
||||
{
|
||||
if (!OidIsValid(relationId))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (ConstraintIsAForeignKey(command->name, relationId))
|
||||
{
|
||||
MarkInvalidateForeignKeyGraph();
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case AT_EnableTrig:
|
||||
case AT_EnableAlwaysTrig:
|
||||
case AT_EnableReplicaTrig:
|
||||
|
@ -1383,6 +1479,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
case AT_SetNotNull:
|
||||
case AT_ReplicaIdentity:
|
||||
case AT_ValidateConstraint:
|
||||
case AT_DropConstraint: /* we do the check for invalidation in AlterTableDropsForeignKey */
|
||||
{
|
||||
/*
|
||||
* We will not perform any special check for:
|
||||
|
|
|
@ -161,6 +161,12 @@ AfterXactConnectionHandling(bool isCommit)
|
|||
hash_seq_init(&status, ConnectionHash);
|
||||
while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0)
|
||||
{
|
||||
if (!entry->isValid)
|
||||
{
|
||||
/* skip invalid connection hash entries */
|
||||
continue;
|
||||
}
|
||||
|
||||
AfterXactHostConnectionHandling(entry, isCommit);
|
||||
|
||||
/*
|
||||
|
@ -289,11 +295,24 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
|
|||
*/
|
||||
|
||||
ConnectionHashEntry *entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found);
|
||||
if (!found)
|
||||
if (!found || !entry->isValid)
|
||||
{
|
||||
/*
|
||||
* We are just building hash entry or previously it was left in an
|
||||
* invalid state as we couldn't allocate memory for it.
|
||||
* So initialize entry->connections list here.
|
||||
*/
|
||||
entry->isValid = false;
|
||||
entry->connections = MemoryContextAlloc(ConnectionContext,
|
||||
sizeof(dlist_head));
|
||||
dlist_init(entry->connections);
|
||||
|
||||
/*
|
||||
* If MemoryContextAlloc errors out -e.g. during an OOM-, entry->connections
|
||||
* stays as NULL. So entry->isValid should be set to true right after we
|
||||
* initialize entry->connections properly.
|
||||
*/
|
||||
entry->isValid = true;
|
||||
}
|
||||
|
||||
/* if desired, check whether there's a usable connection */
|
||||
|
@ -449,6 +468,12 @@ CloseAllConnectionsAfterTransaction(void)
|
|||
hash_seq_init(&status, ConnectionHash);
|
||||
while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0)
|
||||
{
|
||||
if (!entry->isValid)
|
||||
{
|
||||
/* skip invalid connection hash entries */
|
||||
continue;
|
||||
}
|
||||
|
||||
dlist_iter iter;
|
||||
|
||||
dlist_head *connections = entry->connections;
|
||||
|
@ -483,7 +508,7 @@ ConnectionAvailableToNode(char *hostName, int nodePort, const char *userName,
|
|||
ConnectionHashEntry *entry =
|
||||
(ConnectionHashEntry *) hash_search(ConnectionHash, &key, HASH_FIND, &found);
|
||||
|
||||
if (!found)
|
||||
if (!found || !entry->isValid)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -509,6 +534,12 @@ CloseNodeConnectionsAfterTransaction(char *nodeName, int nodePort)
|
|||
hash_seq_init(&status, ConnectionHash);
|
||||
while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0)
|
||||
{
|
||||
if (!entry->isValid)
|
||||
{
|
||||
/* skip invalid connection hash entries */
|
||||
continue;
|
||||
}
|
||||
|
||||
dlist_iter iter;
|
||||
|
||||
if (strcmp(entry->key.hostname, nodeName) != 0 || entry->key.port != nodePort)
|
||||
|
@ -584,6 +615,12 @@ ShutdownAllConnections(void)
|
|||
hash_seq_init(&status, ConnectionHash);
|
||||
while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != NULL)
|
||||
{
|
||||
if (!entry->isValid)
|
||||
{
|
||||
/* skip invalid connection hash entries */
|
||||
continue;
|
||||
}
|
||||
|
||||
dlist_iter iter;
|
||||
|
||||
dlist_foreach(iter, entry->connections)
|
||||
|
@ -1194,6 +1231,12 @@ FreeConnParamsHashEntryFields(ConnParamsHashEntry *entry)
|
|||
static void
|
||||
AfterXactHostConnectionHandling(ConnectionHashEntry *entry, bool isCommit)
|
||||
{
|
||||
if (!entry || !entry->isValid)
|
||||
{
|
||||
/* callers only pass valid hash entries but let's be on the safe side */
|
||||
ereport(ERROR, (errmsg("connection hash entry is NULL or invalid")));
|
||||
}
|
||||
|
||||
dlist_mutable_iter iter;
|
||||
int cachedConnectionCount = 0;
|
||||
|
||||
|
|
|
@ -660,6 +660,16 @@ static void SetAttributeInputMetadata(DistributedExecution *execution,
|
|||
void
|
||||
AdaptiveExecutorPreExecutorRun(CitusScanState *scanState)
|
||||
{
|
||||
if (scanState->finishedPreScan)
|
||||
{
|
||||
/*
|
||||
* Cursors (and hence RETURN QUERY syntax in pl/pgsql functions)
|
||||
* may trigger AdaptiveExecutorPreExecutorRun() on every fetch
|
||||
* operation. Though, we should only execute PreScan once.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
DistributedPlan *distributedPlan = scanState->distributedPlan;
|
||||
|
||||
/*
|
||||
|
@ -670,6 +680,8 @@ AdaptiveExecutorPreExecutorRun(CitusScanState *scanState)
|
|||
LockPartitionsForDistributedPlan(distributedPlan);
|
||||
|
||||
ExecuteSubPlans(distributedPlan);
|
||||
|
||||
scanState->finishedPreScan = true;
|
||||
}
|
||||
|
||||
|
||||
|
@ -3285,6 +3297,25 @@ TransactionStateMachine(WorkerSession *session)
|
|||
case REMOTE_TRANS_SENT_COMMAND:
|
||||
{
|
||||
TaskPlacementExecution *placementExecution = session->currentTask;
|
||||
if (placementExecution == NULL)
|
||||
{
|
||||
/*
|
||||
* We have seen accounts in production where the placementExecution
|
||||
* could inadvertently be not set. Investigation documented on
|
||||
* https://github.com/citusdata/citus-enterprise/issues/493
|
||||
* (due to sensitive data in the initial report it is not discussed
|
||||
* in our community repository)
|
||||
*
|
||||
* Currently we don't have a reliable way of reproducing this issue.
|
||||
* Erroring here seems to be a more desirable approach compared to a
|
||||
* SEGFAULT on the dereference of placementExecution, with a possible
|
||||
* crash recovery as a result.
|
||||
*/
|
||||
ereport(ERROR, (errmsg(
|
||||
"unable to recover from inconsistent state in "
|
||||
"the connection state machine on coordinator")));
|
||||
}
|
||||
|
||||
ShardCommandExecution *shardCommandExecution =
|
||||
placementExecution->shardCommandExecution;
|
||||
Task *task = shardCommandExecution->task;
|
||||
|
|
|
@ -558,6 +558,9 @@ AdaptiveExecutorCreateScan(CustomScan *scan)
|
|||
scanState->customScanState.methods = &AdaptiveExecutorCustomExecMethods;
|
||||
scanState->PreExecScan = &CitusPreExecScan;
|
||||
|
||||
scanState->finishedPreScan = false;
|
||||
scanState->finishedRemoteScan = false;
|
||||
|
||||
return (Node *) scanState;
|
||||
}
|
||||
|
||||
|
@ -578,6 +581,9 @@ NonPushableInsertSelectCreateScan(CustomScan *scan)
|
|||
scanState->customScanState.methods =
|
||||
&NonPushableInsertSelectCustomExecMethods;
|
||||
|
||||
scanState->finishedPreScan = false;
|
||||
scanState->finishedRemoteScan = false;
|
||||
|
||||
return (Node *) scanState;
|
||||
}
|
||||
|
||||
|
|
|
@ -409,7 +409,8 @@ LocallyExecuteUtilityTask(const char *localTaskQueryCommand)
|
|||
* process utility.
|
||||
*/
|
||||
CitusProcessUtility(localTaskRawParseTree, localTaskQueryCommand,
|
||||
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
||||
PROCESS_UTILITY_QUERY, NULL, None_Receiver,
|
||||
NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1033,25 +1033,13 @@ BuildViewDependencyGraph(Oid relationId, HTAB *nodeMap)
|
|||
node->remainingDependencyCount = 0;
|
||||
node->dependingNodes = NIL;
|
||||
|
||||
ObjectAddress target = { 0 };
|
||||
ObjectAddressSet(target, RelationRelationId, relationId);
|
||||
Oid targetObjectClassId = RelationRelationId;
|
||||
Oid targetObjectId = relationId;
|
||||
List *dependencyTupleList = GetPgDependTuplesForDependingObjects(targetObjectClassId,
|
||||
targetObjectId);
|
||||
|
||||
ScanKeyData key[2];
|
||||
HeapTuple depTup = NULL;
|
||||
|
||||
/*
|
||||
* iterate the actual pg_depend catalog
|
||||
*/
|
||||
Relation depRel = table_open(DependRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(target.classId));
|
||||
ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(target.objectId));
|
||||
SysScanDesc depScan = systable_beginscan(depRel, DependReferenceIndexId,
|
||||
true, NULL, 2, key);
|
||||
|
||||
while (HeapTupleIsValid(depTup = systable_getnext(depScan)))
|
||||
foreach_ptr(depTup, dependencyTupleList)
|
||||
{
|
||||
Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
|
||||
|
||||
|
@ -1066,13 +1054,48 @@ BuildViewDependencyGraph(Oid relationId, HTAB *nodeMap)
|
|||
}
|
||||
}
|
||||
|
||||
systable_endscan(depScan);
|
||||
relation_close(depRel, AccessShareLock);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetPgDependTuplesForDependingObjects scans pg_depend for given object and
|
||||
* returns a list of heap tuples for the objects depending on it.
|
||||
*/
|
||||
List *
|
||||
GetPgDependTuplesForDependingObjects(Oid targetObjectClassId, Oid targetObjectId)
|
||||
{
|
||||
List *dependencyTupleList = NIL;
|
||||
|
||||
Relation pgDepend = table_open(DependRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyData key[2];
|
||||
int scanKeyCount = 2;
|
||||
|
||||
ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(targetObjectClassId));
|
||||
ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(targetObjectId));
|
||||
|
||||
bool useIndex = true;
|
||||
SysScanDesc depScan = systable_beginscan(pgDepend, DependReferenceIndexId,
|
||||
useIndex, NULL, scanKeyCount, key);
|
||||
|
||||
HeapTuple dependencyTuple = NULL;
|
||||
while (HeapTupleIsValid(dependencyTuple = systable_getnext(depScan)))
|
||||
{
|
||||
/* copy the tuple first */
|
||||
dependencyTuple = heap_copytuple(dependencyTuple);
|
||||
dependencyTupleList = lappend(dependencyTupleList, dependencyTuple);
|
||||
}
|
||||
|
||||
systable_endscan(depScan);
|
||||
relation_close(pgDepend, AccessShareLock);
|
||||
|
||||
return dependencyTupleList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetDependingViews takes a relation id, finds the views that depend on the relation
|
||||
* and returns list of the oids of those views. It recurses on the pg_depend table to
|
||||
|
|
|
@ -254,8 +254,8 @@ static void InvalidateCitusTableCacheEntrySlot(CitusTableCacheEntrySlot *cacheSl
|
|||
static void InvalidateDistTableCache(void);
|
||||
static void InvalidateDistObjectCache(void);
|
||||
static void InitializeTableCacheEntry(int64 shardId);
|
||||
static bool IsCitusTableTypeInternal(CitusTableCacheEntry *tableEntry, CitusTableType
|
||||
tableType);
|
||||
static bool IsCitusTableTypeInternal(char partitionMethod, char replicationModel,
|
||||
CitusTableType tableType);
|
||||
static bool RefreshTableCacheEntryIfInvalid(ShardIdCacheEntry *shardEntry);
|
||||
|
||||
|
||||
|
@ -309,7 +309,7 @@ IsCitusTableType(Oid relationId, CitusTableType tableType)
|
|||
{
|
||||
return false;
|
||||
}
|
||||
return IsCitusTableTypeInternal(tableEntry, tableType);
|
||||
return IsCitusTableTypeCacheEntry(tableEntry, tableType);
|
||||
}
|
||||
|
||||
|
||||
|
@ -320,7 +320,8 @@ IsCitusTableType(Oid relationId, CitusTableType tableType)
|
|||
bool
|
||||
IsCitusTableTypeCacheEntry(CitusTableCacheEntry *tableEntry, CitusTableType tableType)
|
||||
{
|
||||
return IsCitusTableTypeInternal(tableEntry, tableType);
|
||||
return IsCitusTableTypeInternal(tableEntry->partitionMethod,
|
||||
tableEntry->replicationModel, tableType);
|
||||
}
|
||||
|
||||
|
||||
|
@ -329,47 +330,48 @@ IsCitusTableTypeCacheEntry(CitusTableCacheEntry *tableEntry, CitusTableType tabl
|
|||
* the given table type group. For definition of table types, see CitusTableType.
|
||||
*/
|
||||
static bool
|
||||
IsCitusTableTypeInternal(CitusTableCacheEntry *tableEntry, CitusTableType tableType)
|
||||
IsCitusTableTypeInternal(char partitionMethod, char replicationModel,
|
||||
CitusTableType tableType)
|
||||
{
|
||||
switch (tableType)
|
||||
{
|
||||
case HASH_DISTRIBUTED:
|
||||
{
|
||||
return tableEntry->partitionMethod == DISTRIBUTE_BY_HASH;
|
||||
return partitionMethod == DISTRIBUTE_BY_HASH;
|
||||
}
|
||||
|
||||
case APPEND_DISTRIBUTED:
|
||||
{
|
||||
return tableEntry->partitionMethod == DISTRIBUTE_BY_APPEND;
|
||||
return partitionMethod == DISTRIBUTE_BY_APPEND;
|
||||
}
|
||||
|
||||
case RANGE_DISTRIBUTED:
|
||||
{
|
||||
return tableEntry->partitionMethod == DISTRIBUTE_BY_RANGE;
|
||||
return partitionMethod == DISTRIBUTE_BY_RANGE;
|
||||
}
|
||||
|
||||
case DISTRIBUTED_TABLE:
|
||||
{
|
||||
return tableEntry->partitionMethod == DISTRIBUTE_BY_HASH ||
|
||||
tableEntry->partitionMethod == DISTRIBUTE_BY_RANGE ||
|
||||
tableEntry->partitionMethod == DISTRIBUTE_BY_APPEND;
|
||||
return partitionMethod == DISTRIBUTE_BY_HASH ||
|
||||
partitionMethod == DISTRIBUTE_BY_RANGE ||
|
||||
partitionMethod == DISTRIBUTE_BY_APPEND;
|
||||
}
|
||||
|
||||
case REFERENCE_TABLE:
|
||||
{
|
||||
return tableEntry->partitionMethod == DISTRIBUTE_BY_NONE &&
|
||||
tableEntry->replicationModel == REPLICATION_MODEL_2PC;
|
||||
return partitionMethod == DISTRIBUTE_BY_NONE &&
|
||||
replicationModel == REPLICATION_MODEL_2PC;
|
||||
}
|
||||
|
||||
case CITUS_LOCAL_TABLE:
|
||||
{
|
||||
return tableEntry->partitionMethod == DISTRIBUTE_BY_NONE &&
|
||||
tableEntry->replicationModel != REPLICATION_MODEL_2PC;
|
||||
return partitionMethod == DISTRIBUTE_BY_NONE &&
|
||||
replicationModel != REPLICATION_MODEL_2PC;
|
||||
}
|
||||
|
||||
case CITUS_TABLE_WITH_NO_DIST_KEY:
|
||||
{
|
||||
return tableEntry->partitionMethod == DISTRIBUTE_BY_NONE;
|
||||
return partitionMethod == DISTRIBUTE_BY_NONE;
|
||||
}
|
||||
|
||||
case ANY_CITUS_TABLE_TYPE:
|
||||
|
@ -3706,12 +3708,25 @@ CitusTableTypeIdList(CitusTableType citusTableType)
|
|||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
bool isNull = false;
|
||||
Datum relationIdDatum = heap_getattr(heapTuple,
|
||||
Anum_pg_dist_partition_logicalrelid,
|
||||
tupleDescriptor, &isNull);
|
||||
Oid relationId = DatumGetObjectId(relationIdDatum);
|
||||
if (IsCitusTableType(relationId, citusTableType))
|
||||
|
||||
Datum partMethodDatum =
|
||||
heap_getattr(heapTuple, Anum_pg_dist_partition_partmethod,
|
||||
tupleDescriptor, &isNull);
|
||||
Datum replicationModelDatum =
|
||||
heap_getattr(heapTuple, Anum_pg_dist_partition_repmodel,
|
||||
tupleDescriptor, &isNull);
|
||||
|
||||
Oid partitionMethod = DatumGetChar(partMethodDatum);
|
||||
Oid replicationModel = DatumGetChar(replicationModelDatum);
|
||||
|
||||
if (IsCitusTableTypeInternal(partitionMethod, replicationModel, citusTableType))
|
||||
{
|
||||
Datum relationIdDatum = heap_getattr(heapTuple,
|
||||
Anum_pg_dist_partition_logicalrelid,
|
||||
tupleDescriptor, &isNull);
|
||||
|
||||
Oid relationId = DatumGetObjectId(relationIdDatum);
|
||||
|
||||
relationIdList = lappend_oid(relationIdList, relationId);
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include "postgres.h"
|
||||
#include "miscadmin.h"
|
||||
|
||||
#include <signal.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
|
@ -28,6 +29,7 @@
|
|||
#include "catalog/pg_foreign_server.h"
|
||||
#include "catalog/pg_namespace.h"
|
||||
#include "catalog/pg_type.h"
|
||||
#include "commands/async.h"
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/deparser.h"
|
||||
|
@ -35,6 +37,7 @@
|
|||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata_utility.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
#include "distributed/maintenanced.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
|
@ -48,11 +51,15 @@
|
|||
#include "foreign/foreign.h"
|
||||
#include "miscadmin.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "pgstat.h"
|
||||
#include "postmaster/bgworker.h"
|
||||
#include "postmaster/postmaster.h"
|
||||
#include "storage/lmgr.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/fmgroids.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/snapmgr.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
|
||||
|
@ -76,11 +83,18 @@ static GrantStmt * GenerateGrantOnSchemaStmtForRights(Oid roleOid,
|
|||
char *permission,
|
||||
bool withGrantOption);
|
||||
static char * GenerateSetRoleQuery(Oid roleOid);
|
||||
static void MetadataSyncSigTermHandler(SIGNAL_ARGS);
|
||||
static void MetadataSyncSigAlrmHandler(SIGNAL_ARGS);
|
||||
|
||||
PG_FUNCTION_INFO_V1(start_metadata_sync_to_node);
|
||||
PG_FUNCTION_INFO_V1(stop_metadata_sync_to_node);
|
||||
PG_FUNCTION_INFO_V1(worker_record_sequence_dependency);
|
||||
|
||||
static bool got_SIGTERM = false;
|
||||
static bool got_SIGALRM = false;
|
||||
|
||||
#define METADATA_SYNC_APP_NAME "Citus Metadata Sync Daemon"
|
||||
|
||||
|
||||
/*
|
||||
* start_metadata_sync_to_node function sets hasmetadata column of the given
|
||||
|
@ -1481,7 +1495,7 @@ DetachPartitionCommandList(void)
|
|||
* metadata workers that are out of sync. Returns the result of
|
||||
* synchronization.
|
||||
*/
|
||||
MetadataSyncResult
|
||||
static MetadataSyncResult
|
||||
SyncMetadataToNodes(void)
|
||||
{
|
||||
MetadataSyncResult result = METADATA_SYNC_SUCCESS;
|
||||
|
@ -1511,6 +1525,9 @@ SyncMetadataToNodes(void)
|
|||
|
||||
if (!SyncMetadataSnapshotToNode(workerNode, raiseInterrupts))
|
||||
{
|
||||
ereport(WARNING, (errmsg("failed to sync metadata to %s:%d",
|
||||
workerNode->workerName,
|
||||
workerNode->workerPort)));
|
||||
result = METADATA_SYNC_FAILED_SYNC;
|
||||
}
|
||||
else
|
||||
|
@ -1523,3 +1540,244 @@ SyncMetadataToNodes(void)
|
|||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SyncMetadataToNodesMain is the main function for syncing metadata to
|
||||
* MX nodes. It retries until success and then exits.
|
||||
*/
|
||||
void
|
||||
SyncMetadataToNodesMain(Datum main_arg)
|
||||
{
|
||||
Oid databaseOid = DatumGetObjectId(main_arg);
|
||||
|
||||
/* extension owner is passed via bgw_extra */
|
||||
Oid extensionOwner = InvalidOid;
|
||||
memcpy_s(&extensionOwner, sizeof(extensionOwner),
|
||||
MyBgworkerEntry->bgw_extra, sizeof(Oid));
|
||||
|
||||
pqsignal(SIGTERM, MetadataSyncSigTermHandler);
|
||||
pqsignal(SIGALRM, MetadataSyncSigAlrmHandler);
|
||||
BackgroundWorkerUnblockSignals();
|
||||
|
||||
/* connect to database, after that we can actually access catalogs */
|
||||
BackgroundWorkerInitializeConnectionByOid(databaseOid, extensionOwner, 0);
|
||||
|
||||
/* make worker recognizable in pg_stat_activity */
|
||||
pgstat_report_appname(METADATA_SYNC_APP_NAME);
|
||||
|
||||
bool syncedAllNodes = false;
|
||||
|
||||
while (!syncedAllNodes)
|
||||
{
|
||||
InvalidateMetadataSystemCache();
|
||||
StartTransactionCommand();
|
||||
|
||||
/*
|
||||
* Some functions in ruleutils.c, which we use to get the DDL for
|
||||
* metadata propagation, require an active snapshot.
|
||||
*/
|
||||
PushActiveSnapshot(GetTransactionSnapshot());
|
||||
|
||||
if (!LockCitusExtension())
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("could not lock the citus extension, "
|
||||
"skipping metadata sync")));
|
||||
}
|
||||
else if (CheckCitusVersion(DEBUG1) && CitusHasBeenLoaded())
|
||||
{
|
||||
UseCoordinatedTransaction();
|
||||
MetadataSyncResult result = SyncMetadataToNodes();
|
||||
|
||||
syncedAllNodes = (result == METADATA_SYNC_SUCCESS);
|
||||
|
||||
/* we use LISTEN/NOTIFY to wait for metadata syncing in tests */
|
||||
if (result != METADATA_SYNC_FAILED_LOCK)
|
||||
{
|
||||
Async_Notify(METADATA_SYNC_CHANNEL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
PopActiveSnapshot();
|
||||
CommitTransactionCommand();
|
||||
ProcessCompletedNotifies();
|
||||
|
||||
if (syncedAllNodes)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If backend is cancelled (e.g. bacause of distributed deadlock),
|
||||
* CHECK_FOR_INTERRUPTS() will raise a cancellation error which will
|
||||
* result in exit(1).
|
||||
*/
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
/*
|
||||
* SIGTERM is used for when maintenance daemon tries to clean-up
|
||||
* metadata sync daemons spawned by terminated maintenance daemons.
|
||||
*/
|
||||
if (got_SIGTERM)
|
||||
{
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* SIGALRM is used for testing purposes and it simulates an error in metadata
|
||||
* sync daemon.
|
||||
*/
|
||||
if (got_SIGALRM)
|
||||
{
|
||||
elog(ERROR, "Error in metadata sync daemon");
|
||||
}
|
||||
|
||||
pg_usleep(MetadataSyncRetryInterval * 1000);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MetadataSyncSigTermHandler set a flag to request termination of metadata
|
||||
* sync daemon.
|
||||
*/
|
||||
static void
|
||||
MetadataSyncSigTermHandler(SIGNAL_ARGS)
|
||||
{
|
||||
int save_errno = errno;
|
||||
|
||||
got_SIGTERM = true;
|
||||
if (MyProc != NULL)
|
||||
{
|
||||
SetLatch(&MyProc->procLatch);
|
||||
}
|
||||
|
||||
errno = save_errno;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MetadataSyncSigAlrmHandler set a flag to request error at metadata
|
||||
* sync daemon. This is used for testing purposes.
|
||||
*/
|
||||
static void
|
||||
MetadataSyncSigAlrmHandler(SIGNAL_ARGS)
|
||||
{
|
||||
int save_errno = errno;
|
||||
|
||||
got_SIGALRM = true;
|
||||
if (MyProc != NULL)
|
||||
{
|
||||
SetLatch(&MyProc->procLatch);
|
||||
}
|
||||
|
||||
errno = save_errno;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SpawnSyncMetadataToNodes starts a background worker which runs metadata
|
||||
* sync. On success it returns workers' handle. Otherwise it returns NULL.
|
||||
*/
|
||||
BackgroundWorkerHandle *
|
||||
SpawnSyncMetadataToNodes(Oid database, Oid extensionOwner)
|
||||
{
|
||||
BackgroundWorker worker;
|
||||
BackgroundWorkerHandle *handle = NULL;
|
||||
|
||||
/* Configure a worker. */
|
||||
memset(&worker, 0, sizeof(worker));
|
||||
SafeSnprintf(worker.bgw_name, BGW_MAXLEN,
|
||||
"Citus Metadata Sync: %u/%u",
|
||||
database, extensionOwner);
|
||||
worker.bgw_flags =
|
||||
BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
|
||||
worker.bgw_start_time = BgWorkerStart_ConsistentState;
|
||||
|
||||
/* don't restart, we manage restarts from maintenance daemon */
|
||||
worker.bgw_restart_time = BGW_NEVER_RESTART;
|
||||
strcpy_s(worker.bgw_library_name, sizeof(worker.bgw_library_name), "citus");
|
||||
strcpy_s(worker.bgw_function_name, sizeof(worker.bgw_library_name),
|
||||
"SyncMetadataToNodesMain");
|
||||
worker.bgw_main_arg = ObjectIdGetDatum(MyDatabaseId);
|
||||
memcpy_s(worker.bgw_extra, sizeof(worker.bgw_extra), &extensionOwner,
|
||||
sizeof(Oid));
|
||||
worker.bgw_notify_pid = MyProcPid;
|
||||
|
||||
if (!RegisterDynamicBackgroundWorker(&worker, &handle))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pid_t pid;
|
||||
WaitForBackgroundWorkerStartup(handle, &pid);
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SignalMetadataSyncDaemon signals metadata sync daemons belonging to
|
||||
* the given database.
|
||||
*/
|
||||
void
|
||||
SignalMetadataSyncDaemon(Oid database, int sig)
|
||||
{
|
||||
int backendCount = pgstat_fetch_stat_numbackends();
|
||||
for (int backend = 1; backend <= backendCount; backend++)
|
||||
{
|
||||
LocalPgBackendStatus *localBeEntry = pgstat_fetch_stat_local_beentry(backend);
|
||||
if (!localBeEntry)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
PgBackendStatus *beStatus = &localBeEntry->backendStatus;
|
||||
if (beStatus->st_databaseid == database &&
|
||||
strncmp(beStatus->st_appname, METADATA_SYNC_APP_NAME, BGW_MAXLEN) == 0)
|
||||
{
|
||||
kill(beStatus->st_procpid, sig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ShouldInitiateMetadataSync returns if metadata sync daemon should be initiated.
|
||||
* It sets lockFailure to true if pg_dist_node lock couldn't be acquired for the
|
||||
* check.
|
||||
*/
|
||||
bool
|
||||
ShouldInitiateMetadataSync(bool *lockFailure)
|
||||
{
|
||||
if (!IsCoordinator())
|
||||
{
|
||||
*lockFailure = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
Oid distNodeOid = DistNodeRelationId();
|
||||
if (!ConditionalLockRelationOid(distNodeOid, AccessShareLock))
|
||||
{
|
||||
*lockFailure = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool shouldSyncMetadata = false;
|
||||
|
||||
List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock);
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerList)
|
||||
{
|
||||
if (workerNode->hasMetadata && !workerNode->metadataSynced)
|
||||
{
|
||||
shouldSyncMetadata = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
UnlockRelationOid(distNodeOid, AccessShareLock);
|
||||
|
||||
*lockFailure = false;
|
||||
return shouldSyncMetadata;
|
||||
}
|
||||
|
|
|
@ -443,7 +443,7 @@ SetUpDistributedTableDependencies(WorkerNode *newWorkerNode)
|
|||
{
|
||||
MarkNodeHasMetadata(newWorkerNode->workerName, newWorkerNode->workerPort,
|
||||
true);
|
||||
TriggerMetadataSync(MyDatabaseId);
|
||||
TriggerMetadataSyncOnCommit();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -809,7 +809,7 @@ master_update_node(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
if (UnsetMetadataSyncedForAll())
|
||||
{
|
||||
TriggerMetadataSync(MyDatabaseId);
|
||||
TriggerMetadataSyncOnCommit();
|
||||
}
|
||||
|
||||
if (handle != NULL)
|
||||
|
|
|
@ -1063,7 +1063,7 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
|
|||
|
||||
INSTR_TIME_SET_CURRENT(planStart);
|
||||
|
||||
PlannedStmt *plan = pg_plan_query_compat(query, NULL, 0, NULL);
|
||||
PlannedStmt *plan = pg_plan_query_compat(query, NULL, CURSOR_OPT_PARALLEL_OK, NULL);
|
||||
|
||||
INSTR_TIME_SET_CURRENT(planDuration);
|
||||
INSTR_TIME_SUBTRACT(planDuration, planStart);
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "catalog/pg_type.h"
|
||||
#include "distributed/connection_management.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/maintenanced.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/remote_commands.h"
|
||||
#include "postmaster/postmaster.h"
|
||||
|
@ -28,6 +29,8 @@
|
|||
/* declarations for dynamic loading */
|
||||
PG_FUNCTION_INFO_V1(master_metadata_snapshot);
|
||||
PG_FUNCTION_INFO_V1(wait_until_metadata_sync);
|
||||
PG_FUNCTION_INFO_V1(trigger_metadata_sync);
|
||||
PG_FUNCTION_INFO_V1(raise_error_in_metadata_sync);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -124,3 +127,26 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS)
|
|||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* trigger_metadata_sync triggers metadata sync for testing.
|
||||
*/
|
||||
Datum
|
||||
trigger_metadata_sync(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TriggerMetadataSyncOnCommit();
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* raise_error_in_metadata_sync causes metadata sync to raise an error.
|
||||
*/
|
||||
Datum
|
||||
raise_error_in_metadata_sync(PG_FUNCTION_ARGS)
|
||||
{
|
||||
/* metadata sync uses SIGALRM to test errors */
|
||||
SignalMetadataSyncDaemon(MyDatabaseId, SIGALRM);
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "distributed/listutils.h"
|
||||
#include "distributed/local_executor.h"
|
||||
#include "distributed/locally_reserved_shared_connections.h"
|
||||
#include "distributed/maintenanced.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/multi_explain.h"
|
||||
#include "distributed/repartition_join_execution.h"
|
||||
|
@ -102,6 +103,9 @@ bool CoordinatedTransactionUses2PC = false;
|
|||
/* if disabled, distributed statements in a function may run as separate transactions */
|
||||
bool FunctionOpensTransactionBlock = true;
|
||||
|
||||
/* if true, we should trigger metadata sync on commit */
|
||||
bool MetadataSyncOnCommit = false;
|
||||
|
||||
|
||||
/* transaction management functions */
|
||||
static void CoordinatedTransactionCallback(XactEvent event, void *arg);
|
||||
|
@ -262,6 +266,15 @@ CoordinatedTransactionCallback(XactEvent event, void *arg)
|
|||
AfterXactConnectionHandling(true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Changes to catalog tables are now visible to the metadata sync
|
||||
* daemon, so we can trigger metadata sync if necessary.
|
||||
*/
|
||||
if (MetadataSyncOnCommit)
|
||||
{
|
||||
TriggerMetadataSync(MyDatabaseId);
|
||||
}
|
||||
|
||||
ResetGlobalVariables();
|
||||
|
||||
/*
|
||||
|
@ -474,6 +487,7 @@ ResetGlobalVariables()
|
|||
activeSetStmts = NULL;
|
||||
CoordinatedTransactionUses2PC = false;
|
||||
TransactionModifiedNodeMetadata = false;
|
||||
MetadataSyncOnCommit = false;
|
||||
ResetWorkerErrorIndication();
|
||||
}
|
||||
|
||||
|
@ -728,3 +742,15 @@ MaybeExecutingUDF(void)
|
|||
{
|
||||
return ExecutorLevel > 1 || (ExecutorLevel == 1 && PlannerLevel > 0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TriggerMetadataSyncOnCommit sets a flag to do metadata sync on commit.
|
||||
* This is because new metadata only becomes visible to the metadata sync
|
||||
* daemon after commit happens.
|
||||
*/
|
||||
void
|
||||
TriggerMetadataSyncOnCommit(void)
|
||||
{
|
||||
MetadataSyncOnCommit = true;
|
||||
}
|
||||
|
|
|
@ -118,7 +118,6 @@ static size_t MaintenanceDaemonShmemSize(void);
|
|||
static void MaintenanceDaemonShmemInit(void);
|
||||
static void MaintenanceDaemonShmemExit(int code, Datum arg);
|
||||
static void MaintenanceDaemonErrorContext(void *arg);
|
||||
static bool LockCitusExtension(void);
|
||||
static bool MetadataSyncTriggeredCheckAndReset(MaintenanceDaemonDBData *dbData);
|
||||
static void WarnMaintenanceDaemonNotStarted(void);
|
||||
|
||||
|
@ -291,6 +290,13 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
TimestampTz lastRecoveryTime = 0;
|
||||
TimestampTz nextMetadataSyncTime = 0;
|
||||
|
||||
|
||||
/*
|
||||
* We do metadata sync in a separate background worker. We need its
|
||||
* handle to be able to check its status.
|
||||
*/
|
||||
BackgroundWorkerHandle *metadataSyncBgwHandle = NULL;
|
||||
|
||||
/*
|
||||
* Look up this worker's configuration.
|
||||
*/
|
||||
|
@ -371,6 +377,12 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
/* make worker recognizable in pg_stat_activity */
|
||||
pgstat_report_appname("Citus Maintenance Daemon");
|
||||
|
||||
/*
|
||||
* Terminate orphaned metadata sync daemons spawned from previously terminated
|
||||
* or crashed maintenanced instances.
|
||||
*/
|
||||
SignalMetadataSyncDaemon(databaseOid, SIGTERM);
|
||||
|
||||
/* enter main loop */
|
||||
while (!got_SIGTERM)
|
||||
{
|
||||
|
@ -450,21 +462,42 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
}
|
||||
#endif
|
||||
|
||||
if (!RecoveryInProgress() &&
|
||||
(MetadataSyncTriggeredCheckAndReset(myDbData) ||
|
||||
GetCurrentTimestamp() >= nextMetadataSyncTime))
|
||||
pid_t metadataSyncBgwPid = 0;
|
||||
BgwHandleStatus metadataSyncStatus =
|
||||
metadataSyncBgwHandle != NULL ?
|
||||
GetBackgroundWorkerPid(metadataSyncBgwHandle, &metadataSyncBgwPid) :
|
||||
BGWH_STOPPED;
|
||||
|
||||
if (metadataSyncStatus != BGWH_STOPPED &&
|
||||
GetCurrentTimestamp() >= nextMetadataSyncTime)
|
||||
{
|
||||
bool metadataSyncFailed = false;
|
||||
/*
|
||||
* Metadata sync is still running, recheck in a short while.
|
||||
*/
|
||||
int nextTimeout = MetadataSyncRetryInterval;
|
||||
nextMetadataSyncTime =
|
||||
TimestampTzPlusMilliseconds(GetCurrentTimestamp(), nextTimeout);
|
||||
timeout = Min(timeout, nextTimeout);
|
||||
}
|
||||
else if (!RecoveryInProgress() &&
|
||||
metadataSyncStatus == BGWH_STOPPED &&
|
||||
(MetadataSyncTriggeredCheckAndReset(myDbData) ||
|
||||
GetCurrentTimestamp() >= nextMetadataSyncTime))
|
||||
{
|
||||
if (metadataSyncBgwHandle)
|
||||
{
|
||||
TerminateBackgroundWorker(metadataSyncBgwHandle);
|
||||
pfree(metadataSyncBgwHandle);
|
||||
metadataSyncBgwHandle = NULL;
|
||||
}
|
||||
|
||||
InvalidateMetadataSystemCache();
|
||||
StartTransactionCommand();
|
||||
|
||||
/*
|
||||
* Some functions in ruleutils.c, which we use to get the DDL for
|
||||
* metadata propagation, require an active snapshot.
|
||||
*/
|
||||
PushActiveSnapshot(GetTransactionSnapshot());
|
||||
|
||||
int nextTimeout = MetadataSyncRetryInterval;
|
||||
bool syncMetadata = false;
|
||||
|
||||
if (!LockCitusExtension())
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("could not lock the citus extension, "
|
||||
|
@ -472,25 +505,28 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
}
|
||||
else if (CheckCitusVersion(DEBUG1) && CitusHasBeenLoaded())
|
||||
{
|
||||
MetadataSyncResult result = SyncMetadataToNodes();
|
||||
metadataSyncFailed = (result != METADATA_SYNC_SUCCESS);
|
||||
bool lockFailure = false;
|
||||
syncMetadata = ShouldInitiateMetadataSync(&lockFailure);
|
||||
|
||||
/*
|
||||
* Notification means we had an attempt on synchronization
|
||||
* without being blocked for pg_dist_node access.
|
||||
* If lock fails, we need to recheck in a short while. If we are
|
||||
* going to sync metadata, we should recheck in a short while to
|
||||
* see if it failed. Otherwise, we can wait longer.
|
||||
*/
|
||||
if (result != METADATA_SYNC_FAILED_LOCK)
|
||||
{
|
||||
Async_Notify(METADATA_SYNC_CHANNEL, NULL);
|
||||
}
|
||||
nextTimeout = (lockFailure || syncMetadata) ?
|
||||
MetadataSyncRetryInterval :
|
||||
MetadataSyncInterval;
|
||||
}
|
||||
|
||||
PopActiveSnapshot();
|
||||
CommitTransactionCommand();
|
||||
ProcessCompletedNotifies();
|
||||
|
||||
int64 nextTimeout = metadataSyncFailed ? MetadataSyncRetryInterval :
|
||||
MetadataSyncInterval;
|
||||
if (syncMetadata)
|
||||
{
|
||||
metadataSyncBgwHandle =
|
||||
SpawnSyncMetadataToNodes(MyDatabaseId, myDbData->userOid);
|
||||
}
|
||||
|
||||
nextMetadataSyncTime =
|
||||
TimestampTzPlusMilliseconds(GetCurrentTimestamp(), nextTimeout);
|
||||
timeout = Min(timeout, nextTimeout);
|
||||
|
@ -626,6 +662,11 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
|||
ProcessConfigFile(PGC_SIGHUP);
|
||||
}
|
||||
}
|
||||
|
||||
if (metadataSyncBgwHandle)
|
||||
{
|
||||
TerminateBackgroundWorker(metadataSyncBgwHandle);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -786,7 +827,7 @@ MaintenanceDaemonErrorContext(void *arg)
|
|||
* LockCitusExtension acquires a lock on the Citus extension or returns
|
||||
* false if the extension does not exist or is being dropped.
|
||||
*/
|
||||
static bool
|
||||
bool
|
||||
LockCitusExtension(void)
|
||||
{
|
||||
Oid extensionOid = get_extension_oid("citus", true);
|
||||
|
|
|
@ -41,7 +41,7 @@ alter_role_if_exists(PG_FUNCTION_ARGS)
|
|||
|
||||
Node *parseTree = ParseTreeNode(utilityQuery);
|
||||
|
||||
CitusProcessUtility(parseTree, utilityQuery, PROCESS_UTILITY_TOPLEVEL, NULL,
|
||||
CitusProcessUtility(parseTree, utilityQuery, PROCESS_UTILITY_QUERY, NULL,
|
||||
None_Receiver, NULL);
|
||||
|
||||
PG_RETURN_BOOL(true);
|
||||
|
@ -98,7 +98,7 @@ worker_create_or_alter_role(PG_FUNCTION_ARGS)
|
|||
|
||||
CitusProcessUtility(parseTree,
|
||||
createRoleUtilityQuery,
|
||||
PROCESS_UTILITY_TOPLEVEL,
|
||||
PROCESS_UTILITY_QUERY,
|
||||
NULL,
|
||||
None_Receiver, NULL);
|
||||
|
||||
|
@ -126,7 +126,7 @@ worker_create_or_alter_role(PG_FUNCTION_ARGS)
|
|||
|
||||
CitusProcessUtility(parseTree,
|
||||
alterRoleUtilityQuery,
|
||||
PROCESS_UTILITY_TOPLEVEL,
|
||||
PROCESS_UTILITY_QUERY,
|
||||
NULL,
|
||||
None_Receiver, NULL);
|
||||
|
||||
|
|
|
@ -465,7 +465,7 @@ SingleReplicatedTable(Oid relationId)
|
|||
List *shardPlacementList = NIL;
|
||||
|
||||
/* we could have append/range distributed tables without shards */
|
||||
if (list_length(shardList) <= 1)
|
||||
if (list_length(shardList) == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -111,12 +111,12 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS)
|
|||
RenameStmt *renameStmt = CreateRenameStatement(&address, newName);
|
||||
const char *sqlRenameStmt = DeparseTreeNode((Node *) renameStmt);
|
||||
CitusProcessUtility((Node *) renameStmt, sqlRenameStmt,
|
||||
PROCESS_UTILITY_TOPLEVEL,
|
||||
PROCESS_UTILITY_QUERY,
|
||||
NULL, None_Receiver, NULL);
|
||||
}
|
||||
|
||||
/* apply create statement locally */
|
||||
CitusProcessUtility(parseTree, sqlStatement, PROCESS_UTILITY_TOPLEVEL, NULL,
|
||||
CitusProcessUtility(parseTree, sqlStatement, PROCESS_UTILITY_QUERY, NULL,
|
||||
None_Receiver, NULL);
|
||||
|
||||
/* type has been created */
|
||||
|
|
|
@ -396,7 +396,7 @@ worker_apply_shard_ddl_command(PG_FUNCTION_ARGS)
|
|||
|
||||
/* extend names in ddl command and apply extended command */
|
||||
RelayEventExtendNames(ddlCommandNode, schemaName, shardId);
|
||||
CitusProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_TOPLEVEL, NULL,
|
||||
CitusProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_QUERY, NULL,
|
||||
None_Receiver, NULL);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
|
@ -428,7 +428,7 @@ worker_apply_inter_shard_ddl_command(PG_FUNCTION_ARGS)
|
|||
RelayEventExtendNamesForInterShardCommands(ddlCommandNode, leftShardId,
|
||||
leftShardSchemaName, rightShardId,
|
||||
rightShardSchemaName);
|
||||
CitusProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_TOPLEVEL, NULL,
|
||||
CitusProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_QUERY, NULL,
|
||||
None_Receiver, NULL);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
|
@ -461,7 +461,7 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
/* run the CREATE SEQUENCE command */
|
||||
CitusProcessUtility(commandNode, commandString, PROCESS_UTILITY_TOPLEVEL, NULL,
|
||||
CitusProcessUtility(commandNode, commandString, PROCESS_UTILITY_QUERY, NULL,
|
||||
None_Receiver, NULL);
|
||||
CommandCounterIncrement();
|
||||
|
||||
|
@ -669,7 +669,7 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS)
|
|||
SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);
|
||||
|
||||
CitusProcessUtility((Node *) localCopyCommand, queryString->data,
|
||||
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
||||
PROCESS_UTILITY_QUERY, NULL, None_Receiver, NULL);
|
||||
|
||||
SetUserIdAndSecContext(savedUserId, savedSecurityContext);
|
||||
|
||||
|
@ -782,7 +782,7 @@ AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName,
|
|||
|
||||
/* since the command is an AlterSeqStmt, a dummy command string works fine */
|
||||
CitusProcessUtility((Node *) alterSequenceStatement, dummyString,
|
||||
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
||||
PROCESS_UTILITY_QUERY, NULL, None_Receiver, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ typedef struct CitusScanState
|
|||
CustomScanState customScanState; /* underlying custom scan node */
|
||||
|
||||
/* function that gets called before postgres starts its execution */
|
||||
bool finishedPreScan; /* flag to check if the pre scan is finished */
|
||||
void (*PreExecScan)(struct CitusScanState *scanState);
|
||||
|
||||
DistributedPlan *distributedPlan; /* distributed execution plan */
|
||||
|
|
|
@ -64,6 +64,26 @@ typedef enum ExtractForeignKeyConstraintsMode
|
|||
EXCLUDE_SELF_REFERENCES = 1 << 2
|
||||
} ExtractForeignKeyConstraintMode;
|
||||
|
||||
|
||||
/*
|
||||
* Flags that can be passed to GetForeignKeyIdsForColumn to
|
||||
* indicate whether relationId argument should match:
|
||||
* - referencing relation or,
|
||||
* - referenced relation,
|
||||
* or we are searching for both sides.
|
||||
*/
|
||||
typedef enum SearchForeignKeyColumnFlags
|
||||
{
|
||||
/* relationId argument should match referencing relation */
|
||||
SEARCH_REFERENCING_RELATION = 1 << 0,
|
||||
|
||||
/* relationId argument should match referenced relation */
|
||||
SEARCH_REFERENCED_RELATION = 1 << 1,
|
||||
|
||||
/* callers can also pass union of above flags */
|
||||
} SearchForeignKeyColumnFlags;
|
||||
|
||||
|
||||
/* cluster.c - forward declarations */
|
||||
extern List * PreprocessClusterStmt(Node *node, const char *clusterCommand);
|
||||
|
||||
|
@ -119,15 +139,21 @@ extern void ErrorIfUnsupportedForeignConstraintExists(Relation relation,
|
|||
Var *distributionColumn,
|
||||
uint32 colocationId);
|
||||
extern void ErrorOutForFKeyBetweenPostgresAndCitusLocalTable(Oid localTableId);
|
||||
extern bool ColumnReferencedByAnyForeignKey(char *columnName, Oid relationId);
|
||||
extern bool ColumnAppearsInForeignKey(char *columnName, Oid relationId);
|
||||
extern bool ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid
|
||||
relationId);
|
||||
extern List * GetReferencingForeignConstaintCommands(Oid relationOid);
|
||||
extern bool AnyForeignKeyDependsOnIndex(Oid indexId);
|
||||
extern bool HasForeignKeyToCitusLocalTable(Oid relationId);
|
||||
extern bool HasForeignKeyToReferenceTable(Oid relationOid);
|
||||
extern bool TableReferenced(Oid relationOid);
|
||||
extern bool TableReferencing(Oid relationOid);
|
||||
extern bool ConstraintIsAUniquenessConstraint(char *inputConstaintName, Oid relationId);
|
||||
extern bool ConstraintIsAForeignKey(char *inputConstaintName, Oid relationOid);
|
||||
extern Oid GetForeignKeyOidByName(char *inputConstaintName, Oid relationId);
|
||||
extern bool ConstraintWithNameIsOfType(char *inputConstaintName, Oid relationId,
|
||||
char targetConstraintType);
|
||||
extern bool ConstraintWithIdIsOfType(Oid constraintId, char targetConstraintType);
|
||||
extern void ErrorIfTableHasExternalForeignKeys(Oid relationId);
|
||||
extern List * GetForeignKeyOids(Oid relationId, int flags);
|
||||
extern Oid GetReferencedTableId(Oid foreignKeyId);
|
||||
|
|
|
@ -178,6 +178,9 @@ typedef struct ConnectionHashEntry
|
|||
{
|
||||
ConnectionHashKey key;
|
||||
dlist_head *connections;
|
||||
|
||||
/* connections list is valid or not */
|
||||
bool isValid;
|
||||
} ConnectionHashEntry;
|
||||
|
||||
/* hash entry for cached connection parameters */
|
||||
|
|
|
@ -25,6 +25,7 @@ extern void StopMaintenanceDaemon(Oid databaseId);
|
|||
extern void TriggerMetadataSync(Oid databaseId);
|
||||
extern void InitializeMaintenanceDaemon(void);
|
||||
extern void InitializeMaintenanceDaemonBackend(void);
|
||||
extern bool LockCitusExtension(void);
|
||||
|
||||
extern void CitusMaintenanceDaemonMain(Datum main_arg);
|
||||
|
||||
|
|
|
@ -21,6 +21,8 @@ extern List * GetUniqueDependenciesList(List *objectAddressesList);
|
|||
extern List * GetDependenciesForObject(const ObjectAddress *target);
|
||||
extern List * OrderObjectAddressListInDependencyOrder(List *objectAddressList);
|
||||
extern bool SupportedDependencyByCitus(const ObjectAddress *address);
|
||||
extern List * GetPgDependTuplesForDependingObjects(Oid targetObjectClassId,
|
||||
Oid targetObjectId);
|
||||
extern List * GetDependingViews(Oid relationId);
|
||||
|
||||
#endif /* CITUS_DEPENDENCY_H */
|
||||
|
|
|
@ -50,11 +50,14 @@ extern char * PlacementUpsertCommand(uint64 shardId, uint64 placementId, int sha
|
|||
extern void CreateTableMetadataOnWorkers(Oid relationId);
|
||||
extern void MarkNodeHasMetadata(const char *nodeName, int32 nodePort, bool hasMetadata);
|
||||
extern void MarkNodeMetadataSynced(const char *nodeName, int32 nodePort, bool synced);
|
||||
extern MetadataSyncResult SyncMetadataToNodes(void);
|
||||
extern BackgroundWorkerHandle * SpawnSyncMetadataToNodes(Oid database, Oid owner);
|
||||
extern bool SendOptionalCommandListToWorkerInTransaction(const char *nodeName, int32
|
||||
nodePort,
|
||||
const char *nodeUser,
|
||||
List *commandList);
|
||||
extern void SyncMetadataToNodesMain(Datum main_arg);
|
||||
extern void SignalMetadataSyncDaemon(Oid database, int sig);
|
||||
extern bool ShouldInitiateMetadataSync(bool *lockFailure);
|
||||
|
||||
#define DELETE_ALL_NODES "TRUNCATE pg_dist_node CASCADE"
|
||||
#define REMOVE_ALL_CLUSTERED_TABLES_COMMAND \
|
||||
|
|
|
@ -121,6 +121,7 @@ extern void InitializeTransactionManagement(void);
|
|||
/* other functions */
|
||||
extern List * ActiveSubXactContexts(void);
|
||||
extern StringInfo BeginAndSetDistributedTransactionIdCommand(void);
|
||||
extern void TriggerMetadataSyncOnCommit(void);
|
||||
|
||||
|
||||
#endif /* TRANSACTION_MANAGMENT_H */
|
||||
|
|
|
@ -181,3 +181,6 @@ s/wrong data type: [0-9]+, expected [0-9]+/wrong data type: XXXX, expected XXXX/
|
|||
|
||||
# Errors with relation OID does not exist
|
||||
s/relation with OID [0-9]+ does not exist/relation with OID XXXX does not exist/g
|
||||
|
||||
# ignore DEBUG1 messages that Postgres generates
|
||||
/^DEBUG: rehashing catalog cache id [0-9]+$/d
|
||||
|
|
|
@ -0,0 +1,218 @@
|
|||
CREATE SCHEMA cursors;
|
||||
SET search_path TO cursors;
|
||||
CREATE TABLE distributed_table (key int, value text);
|
||||
SELECT create_distributed_table('distributed_table', 'key');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- load some data, but not very small amounts because RETURN QUERY in plpgsql
|
||||
-- hard codes the cursor fetch to 50 rows on PG 12, though they might increase
|
||||
-- it sometime in the future, so be mindful
|
||||
INSERT INTO distributed_table SELECT i % 10, i::text FROM generate_series(0, 1000) i;
|
||||
CREATE OR REPLACE FUNCTION simple_cursor_on_dist_table(cursor_name refcursor) RETURNS refcursor AS '
|
||||
BEGIN
|
||||
OPEN $1 FOR SELECT DISTINCT key FROM distributed_table ORDER BY 1;
|
||||
RETURN $1;
|
||||
END;
|
||||
' LANGUAGE plpgsql;
|
||||
CREATE OR REPLACE FUNCTION cursor_with_intermediate_result_on_dist_table(cursor_name refcursor) RETURNS refcursor AS '
|
||||
BEGIN
|
||||
OPEN $1 FOR
|
||||
WITH cte_1 AS (SELECT * FROM distributed_table OFFSET 0)
|
||||
SELECT DISTINCT key FROM distributed_table WHERE value in (SELECT value FROM cte_1) ORDER BY 1;
|
||||
RETURN $1;
|
||||
END;
|
||||
' LANGUAGE plpgsql;
|
||||
CREATE OR REPLACE FUNCTION cursor_with_intermediate_result_on_dist_table_with_param(cursor_name refcursor, filter text) RETURNS refcursor AS '
|
||||
BEGIN
|
||||
OPEN $1 FOR
|
||||
WITH cte_1 AS (SELECT * FROM distributed_table WHERE value < $2 OFFSET 0)
|
||||
SELECT DISTINCT key FROM distributed_table WHERE value in (SELECT value FROM cte_1) ORDER BY 1;
|
||||
RETURN $1;
|
||||
END;
|
||||
' LANGUAGE plpgsql;
|
||||
-- pretty basic query with cursors
|
||||
-- Citus should plan/execute once and pull
|
||||
-- the results to coordinator, then serve it
|
||||
-- from the coordinator
|
||||
BEGIN;
|
||||
SELECT simple_cursor_on_dist_table('cursor_1');
|
||||
simple_cursor_on_dist_table
|
||||
---------------------------------------------------------------------
|
||||
cursor_1
|
||||
(1 row)
|
||||
|
||||
SET LOCAL citus.log_intermediate_results TO ON;
|
||||
SET LOCAL client_min_messages TO DEBUG1;
|
||||
FETCH 5 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
(5 rows)
|
||||
|
||||
FETCH 50 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
(5 rows)
|
||||
|
||||
FETCH ALL IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
SELECT cursor_with_intermediate_result_on_dist_table('cursor_1');
|
||||
cursor_with_intermediate_result_on_dist_table
|
||||
---------------------------------------------------------------------
|
||||
cursor_1
|
||||
(1 row)
|
||||
|
||||
-- multiple FETCH commands should not trigger re-running the subplans
|
||||
SET LOCAL citus.log_intermediate_results TO ON;
|
||||
SET LOCAL client_min_messages TO DEBUG1;
|
||||
FETCH 5 IN cursor_1;
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
(5 rows)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
FETCH ALL IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
(4 rows)
|
||||
|
||||
FETCH 5 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
SELECT cursor_with_intermediate_result_on_dist_table_with_param('cursor_1', '600');
|
||||
cursor_with_intermediate_result_on_dist_table_with_param
|
||||
---------------------------------------------------------------------
|
||||
cursor_1
|
||||
(1 row)
|
||||
|
||||
-- multiple FETCH commands should not trigger re-running the subplans
|
||||
-- also test with parameters
|
||||
SET LOCAL citus.log_intermediate_results TO ON;
|
||||
SET LOCAL client_min_messages TO DEBUG1;
|
||||
FETCH 1 IN cursor_1;
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
FETCH 1 IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
FETCH ALL IN cursor_1;
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
(4 rows)
|
||||
|
||||
COMMIT;
|
||||
CREATE OR REPLACE FUNCTION value_counter() RETURNS TABLE(counter text) LANGUAGE PLPGSQL AS $function$
|
||||
BEGIN
|
||||
return query
|
||||
WITH cte AS
|
||||
(SELECT dt.value
|
||||
FROM distributed_table dt
|
||||
WHERE dt.value in
|
||||
(SELECT value
|
||||
FROM distributed_table p
|
||||
GROUP BY p.value
|
||||
HAVING count(*) > 0))
|
||||
|
||||
SELECT * FROM cte;
|
||||
END;
|
||||
$function$ ;
|
||||
SET citus.log_intermediate_results TO ON;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
\set VERBOSITY terse
|
||||
SELECT count(*) from (SELECT value_counter()) as foo;
|
||||
DEBUG: CTE cte is going to be inlined via distributed planning
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT value FROM cursors.distributed_table p GROUP BY value HAVING (count(*) OPERATOR(pg_catalog.>) 0)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT value FROM (SELECT dt.value FROM cursors.distributed_table dt WHERE (dt.value OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)))) cte
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SELECT count(*) from (SELECT value_counter()) as foo;
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
COMMIT;
|
||||
-- suppress NOTICEs
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA cursors CASCADE;
|
|
@ -233,7 +233,7 @@ SELECT * FROM test WHERE x = 1;
|
|||
ERROR: node group 0 does not have a secondary node
|
||||
-- add the the follower as secondary nodes and try again, the SELECT statement
|
||||
-- should work this time
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
SET search_path TO single_node;
|
||||
SELECT 1 FROM master_add_node('localhost', :follower_master_port, groupid => 0, noderole => 'secondary');
|
||||
?column?
|
||||
|
@ -350,7 +350,7 @@ SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
|
|||
|
||||
RESET citus.task_assignment_policy;
|
||||
-- Cleanup
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
SET search_path TO single_node;
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA single_node CASCADE;
|
||||
|
|
|
@ -1216,6 +1216,7 @@ ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
|||
DO UPDATE SET
|
||||
cardinality = enriched.cardinality + excluded.cardinality,
|
||||
sum = enriched.sum + excluded.sum;
|
||||
DEBUG: rehashing catalog cache id 14 for pg_opclass; 17 tups, 8 buckets at character 224
|
||||
DEBUG: INSERT target table and the source relation of the SELECT partition column value must be colocated in distributed INSERT ... SELECT
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
|
|
|
@ -28,11 +28,11 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
|
||||
|
||||
390 389 f
|
||||
395 394 f
|
||||
transactionnumberwaitingtransactionnumbers
|
||||
|
||||
389
|
||||
390 389
|
||||
394
|
||||
395 394
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
|
@ -75,14 +75,14 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
|
||||
|
||||
394 393 f
|
||||
395 393 f
|
||||
395 394 t
|
||||
399 398 f
|
||||
400 398 f
|
||||
400 399 t
|
||||
transactionnumberwaitingtransactionnumbers
|
||||
|
||||
393
|
||||
394 393
|
||||
395 393,394
|
||||
398
|
||||
399 398
|
||||
400 398,399
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
|
|
|
@ -0,0 +1,204 @@
|
|||
Parsed test spec with 3 sessions
|
||||
|
||||
starting permutation: enable-deadlock-detection reload-conf s2-start-session-level-connection s1-begin s1-update-1 s2-begin-on-worker s2-update-2-on-worker s2-truncate-on-worker s3-invalidate-metadata s3-resync s3-wait s2-update-1-on-worker s1-update-2 s1-commit s2-commit-on-worker disable-deadlock-detection reload-conf s2-stop-connection
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step enable-deadlock-detection:
|
||||
ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO 1.1;
|
||||
|
||||
step reload-conf:
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
pg_reload_conf
|
||||
|
||||
t
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-update-1:
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1;
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-update-2-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-truncate-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('TRUNCATE t2');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-invalidate-metadata:
|
||||
update pg_dist_node SET metadatasynced = false;
|
||||
|
||||
step s3-resync:
|
||||
SELECT trigger_metadata_sync();
|
||||
|
||||
trigger_metadata_sync
|
||||
|
||||
|
||||
step s3-wait:
|
||||
SELECT pg_sleep(2);
|
||||
|
||||
pg_sleep
|
||||
|
||||
|
||||
step s2-update-1-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1');
|
||||
<waiting ...>
|
||||
step s1-update-2:
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2;
|
||||
<waiting ...>
|
||||
step s1-update-2: <... completed>
|
||||
step s2-update-1-on-worker: <... completed>
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
error in steps s1-update-2 s2-update-1-on-worker: ERROR: canceling the transaction since it was involved in a distributed deadlock
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-commit-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step disable-deadlock-detection:
|
||||
ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO -1;
|
||||
|
||||
step reload-conf:
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
pg_reload_conf
|
||||
|
||||
t
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
starting permutation: increase-retry-interval reload-conf s2-start-session-level-connection s2-begin-on-worker s2-truncate-on-worker s3-invalidate-metadata s3-resync s3-wait s1-count-daemons s1-cancel-metadata-sync s1-count-daemons reset-retry-interval reload-conf s2-commit-on-worker s2-stop-connection s3-resync s3-wait
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step increase-retry-interval:
|
||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 20000;
|
||||
|
||||
step reload-conf:
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
pg_reload_conf
|
||||
|
||||
t
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-truncate-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('TRUNCATE t2');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-invalidate-metadata:
|
||||
update pg_dist_node SET metadatasynced = false;
|
||||
|
||||
step s3-resync:
|
||||
SELECT trigger_metadata_sync();
|
||||
|
||||
trigger_metadata_sync
|
||||
|
||||
|
||||
step s3-wait:
|
||||
SELECT pg_sleep(2);
|
||||
|
||||
pg_sleep
|
||||
|
||||
|
||||
step s1-count-daemons:
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%';
|
||||
|
||||
count
|
||||
|
||||
1
|
||||
step s1-cancel-metadata-sync:
|
||||
SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%';
|
||||
SELECT pg_sleep(2);
|
||||
|
||||
pg_cancel_backend
|
||||
|
||||
t
|
||||
pg_sleep
|
||||
|
||||
|
||||
step s1-count-daemons:
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%';
|
||||
|
||||
count
|
||||
|
||||
0
|
||||
step reset-retry-interval:
|
||||
ALTER SYSTEM RESET citus.metadata_sync_retry_interval;
|
||||
|
||||
step reload-conf:
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
pg_reload_conf
|
||||
|
||||
t
|
||||
step s2-commit-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-resync:
|
||||
SELECT trigger_metadata_sync();
|
||||
|
||||
trigger_metadata_sync
|
||||
|
||||
|
||||
step s3-wait:
|
||||
SELECT pg_sleep(2);
|
||||
|
||||
pg_sleep
|
||||
|
||||
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
|
@ -458,7 +458,7 @@ DROP TABLE prev_objects, extension_diff;
|
|||
SHOW citus.version;
|
||||
citus.version
|
||||
---------------------------------------------------------------------
|
||||
9.5devel
|
||||
9.5.4
|
||||
(1 row)
|
||||
|
||||
-- ensure no objects were created outside pg_catalog
|
||||
|
|
|
@ -269,7 +269,7 @@ ERROR: writing to worker nodes is not currently allowed
|
|||
DETAIL: citus.use_secondary_nodes is set to 'always'
|
||||
SELECT * FROM citus_local_table ORDER BY a;
|
||||
ERROR: there is a shard placement in node group 0 but there are no nodes in that group
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
DROP TABLE the_table;
|
||||
DROP TABLE reference_table;
|
||||
DROP TABLE citus_local_table;
|
||||
|
|
|
@ -77,7 +77,7 @@ order by s_i_id;
|
|||
SELECT * FROM the_table;
|
||||
ERROR: node group does not have a secondary node
|
||||
-- add the secondary nodes and try again, the SELECT statement should work this time
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port,
|
||||
groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_1_port),
|
||||
noderole => 'secondary');
|
||||
|
@ -149,7 +149,7 @@ order by s_i_id;
|
|||
ERROR: there is a shard placement in node group but there are no nodes in that group
|
||||
-- now move the secondary nodes into the new cluster and see that the follower, finally
|
||||
-- correctly configured, can run select queries involving them
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
UPDATE pg_dist_node SET nodecluster = 'second-cluster' WHERE noderole = 'secondary';
|
||||
\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'"
|
||||
SELECT * FROM the_table;
|
||||
|
@ -160,6 +160,6 @@ SELECT * FROM the_table;
|
|||
(2 rows)
|
||||
|
||||
-- clean up after ourselves
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
DROP TABLE the_table;
|
||||
DROP TABLE stock;
|
||||
|
|
|
@ -938,5 +938,239 @@ SELECT create_reference_table('self_referencing_reference_table');
|
|||
(1 row)
|
||||
|
||||
ALTER TABLE self_referencing_reference_table ADD CONSTRAINT fk FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_reference_table(id, other_column);
|
||||
-- make sure that if fkey is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int unique);
|
||||
CREATE TABLE dropfkeytest2 (x int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (x) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
|
||||
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
|
||||
ALTER TABLE dropfkeytest2 DROP CONSTRAINT fkey1;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
-- make sure that if a column that is in a fkey is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int unique);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
|
||||
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
|
||||
ALTER TABLE dropfkeytest2 DROP COLUMN y;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
-- make sure that even if a column that is in a multi-column index is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int, y int);
|
||||
CREATE UNIQUE INDEX indd ON dropfkeytest1(x, y);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (x, y) REFERENCES dropfkeytest1(x, y);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
|
||||
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
|
||||
ALTER TABLE dropfkeytest2 DROP COLUMN y CASCADE;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
-- make sure that even if a column that is in a multi-column fkey is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int, y int);
|
||||
CREATE UNIQUE INDEX indd ON dropfkeytest1(x, y);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (x, y) REFERENCES dropfkeytest1(x, y);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
|
||||
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
|
||||
ALTER TABLE dropfkeytest1 DROP COLUMN y CASCADE;
|
||||
NOTICE: drop cascades to constraint fkey1 on table dropfkeytest2
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
-- make sure that even if an index which fkey relies on is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
-- also irrelevant index drops doesn't affect this
|
||||
CREATE TABLE dropfkeytest1 (x int);
|
||||
CREATE UNIQUE INDEX i1 ON dropfkeytest1(x);
|
||||
CREATE UNIQUE INDEX unrelated_idx ON dropfkeytest1(x);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
|
||||
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
|
||||
DROP INDEX unrelated_idx CASCADE;
|
||||
-- should still error out since we didn't drop the index that foreign key depends on
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
|
||||
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
|
||||
DROP INDEX i1 CASCADE;
|
||||
NOTICE: drop cascades to constraint fkey1 on table dropfkeytest2
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
-- make sure that even if a uniqueness constraint which fkey depends on is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int unique);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
|
||||
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
|
||||
ALTER TABLE dropfkeytest1 DROP CONSTRAINT dropfkeytest1_x_key CASCADE;
|
||||
NOTICE: drop cascades to constraint fkey1 on table dropfkeytest2
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
-- make sure that even if a primary key which fkey depends on is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int primary key);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
|
||||
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
|
||||
ALTER TABLE dropfkeytest1 DROP CONSTRAINT dropfkeytest1_pkey CASCADE;
|
||||
NOTICE: drop cascades to constraint fkey1 on table dropfkeytest2
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
-- make sure that even if a schema which fkey depends on is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE SCHEMA fkeytestsc;
|
||||
CREATE TABLE fkeytestsc.dropfkeytest1 (x int unique);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES fkeytestsc.dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('fkeytestsc.dropfkeytest1', 'x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
|
||||
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
|
||||
DROP SCHEMA fkeytestsc CASCADE;
|
||||
NOTICE: drop cascades to 2 other objects
|
||||
DETAIL: drop cascades to table fkeytestsc.dropfkeytest1
|
||||
drop cascades to constraint fkey1 on table dropfkeytest2
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE dropfkeytest2 CASCADE;
|
||||
-- make sure that even if a table which fkey depends on is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int unique);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
|
||||
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
|
||||
DROP TABLE dropfkeytest1 CASCADE;
|
||||
NOTICE: drop cascades to constraint fkey1 on table dropfkeytest2
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- we no longer need those tables
|
||||
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table;
|
||||
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table, dropfkeytest2;
|
||||
|
|
|
@ -21,6 +21,27 @@ CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLE
|
|||
master_run_on_worker(ARRAY[hostname], ARRAY[port],
|
||||
ARRAY['SELECT pg_reload_conf()'], false);
|
||||
$$;
|
||||
CREATE OR REPLACE FUNCTION trigger_metadata_sync()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
CREATE OR REPLACE FUNCTION raise_error_in_metadata_sync()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
CREATE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$
|
||||
declare
|
||||
counter integer := -1;
|
||||
begin
|
||||
while counter != target_count loop
|
||||
-- pg_stat_activity is cached at xact level and there is no easy way to clear it.
|
||||
-- Look it up in a new connection to get latest updates.
|
||||
SELECT result::int into counter FROM
|
||||
master_run_on_worker(ARRAY['localhost'], ARRAY[57636], ARRAY[
|
||||
'SELECT count(*) FROM pg_stat_activity WHERE application_name = ' || quote_literal(appname) || ';'], false);
|
||||
PERFORM pg_sleep(0.1);
|
||||
end loop;
|
||||
end$$ LANGUAGE plpgsql;
|
||||
-- add a node to the cluster
|
||||
SELECT master_add_node('localhost', :worker_1_port) As nodeid_1 \gset
|
||||
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
|
@ -152,6 +173,142 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node;
|
|||
2 | t | f
|
||||
(1 row)
|
||||
|
||||
-- verify that metadata sync daemon has started
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Metadata Sync Daemon';
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
--
|
||||
-- terminate maintenance daemon, and verify that we don't spawn multiple
|
||||
-- metadata sync daemons
|
||||
--
|
||||
SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
||||
pg_terminate_backend
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CALL wait_until_process_count('Citus Maintenance Daemon', 1);
|
||||
select trigger_metadata_sync();
|
||||
trigger_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
select wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Metadata Sync Daemon';
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
--
|
||||
-- cancel metadata sync daemon, and verify that it exits and restarts.
|
||||
--
|
||||
select pid as pid_before_cancel from pg_stat_activity where application_name like 'Citus Met%' \gset
|
||||
select pg_cancel_backend(pid) from pg_stat_activity where application_name = 'Citus Metadata Sync Daemon';
|
||||
pg_cancel_backend
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
select wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
select pid as pid_after_cancel from pg_stat_activity where application_name like 'Citus Met%' \gset
|
||||
select :pid_before_cancel != :pid_after_cancel AS metadata_sync_restarted;
|
||||
metadata_sync_restarted
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
--
|
||||
-- cancel metadata sync daemon so it exits and restarts, but at the
|
||||
-- same time tell maintenanced to trigger a new metadata sync. One
|
||||
-- of these should exit to avoid multiple metadata syncs.
|
||||
--
|
||||
select pg_cancel_backend(pid) from pg_stat_activity where application_name = 'Citus Metadata Sync Daemon';
|
||||
pg_cancel_backend
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
select trigger_metadata_sync();
|
||||
trigger_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
select wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- we assume citus.metadata_sync_retry_interval is 500ms. Change amount we sleep to ceiling + 0.2 if it changes.
|
||||
select pg_sleep(1.2);
|
||||
pg_sleep
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Metadata Sync Daemon';
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
--
|
||||
-- error in metadata sync daemon, and verify it exits and restarts.
|
||||
--
|
||||
select pid as pid_before_error from pg_stat_activity where application_name like 'Citus Met%' \gset
|
||||
select raise_error_in_metadata_sync();
|
||||
raise_error_in_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
select wait_until_metadata_sync(30000);
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
select pid as pid_after_error from pg_stat_activity where application_name like 'Citus Met%' \gset
|
||||
select :pid_before_error != :pid_after_error AS metadata_sync_restarted;
|
||||
metadata_sync_restarted
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT trigger_metadata_sync();
|
||||
trigger_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT wait_until_metadata_sync(30000);
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Metadata Sync Daemon';
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- update it back to :worker_1_port, now metadata should be synced
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port);
|
||||
?column?
|
||||
|
@ -594,6 +751,59 @@ SELECT verify_metadata('localhost', :worker_1_port);
|
|||
t
|
||||
(1 row)
|
||||
|
||||
-- verify that metadata sync daemon exits
|
||||
call wait_until_process_count('Citus Metadata Sync Daemon', 0);
|
||||
-- verify that DROP DATABASE terminates metadata sync
|
||||
SELECT current_database() datname \gset
|
||||
CREATE DATABASE db_to_drop;
|
||||
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
|
||||
SELECT run_command_on_workers('CREATE DATABASE db_to_drop');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE DATABASE")
|
||||
(localhost,57638,t,"CREATE DATABASE")
|
||||
(2 rows)
|
||||
|
||||
\c db_to_drop - - :worker_1_port
|
||||
CREATE EXTENSION citus;
|
||||
\c db_to_drop - - :master_port
|
||||
CREATE EXTENSION citus;
|
||||
SELECT master_add_node('localhost', :worker_1_port);
|
||||
master_add_node
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
UPDATE pg_dist_node SET hasmetadata = true;
|
||||
SELECT master_update_node(nodeid, 'localhost', 12345) FROM pg_dist_node;
|
||||
master_update_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION trigger_metadata_sync()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
SELECT trigger_metadata_sync();
|
||||
trigger_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c :datname - - :master_port
|
||||
SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%';
|
||||
datname
|
||||
---------------------------------------------------------------------
|
||||
db_to_drop
|
||||
(1 row)
|
||||
|
||||
DROP DATABASE db_to_drop;
|
||||
SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%';
|
||||
datname
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- cleanup
|
||||
DROP TABLE ref_table;
|
||||
TRUNCATE pg_dist_colocation;
|
||||
|
|
|
@ -26,7 +26,7 @@ WITH dist_node_summary AS (
|
|||
ARRAY[dist_node_summary.query, dist_node_summary.query],
|
||||
false)
|
||||
), dist_placement_summary AS (
|
||||
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement)' AS query
|
||||
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement' AS query
|
||||
), dist_placement_check AS (
|
||||
SELECT count(distinct result) = 1 AS matches
|
||||
FROM dist_placement_summary CROSS JOIN LATERAL
|
||||
|
|
|
@ -493,7 +493,7 @@ BEGIN;
|
|||
(2 rows)
|
||||
|
||||
ROLLBACK;
|
||||
-- INSERT SELECT with RETURNING/ON CONFLICT clauses should honor shared_pool_size
|
||||
-- INSERT SELECT with RETURNING/ON CONFLICT clauses does not honor shared_pool_size
|
||||
-- in underlying COPY commands
|
||||
BEGIN;
|
||||
SELECT pg_sleep(0.1);
|
||||
|
@ -502,7 +502,9 @@ BEGIN;
|
|||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test SELECT i FROM generate_series(0,10) i RETURNING *;
|
||||
-- make sure that we hit at least 4 shards per node, where 20 rows
|
||||
-- is enough
|
||||
INSERT INTO test SELECT i FROM generate_series(0,20) i RETURNING *;
|
||||
a
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
|
@ -516,10 +518,20 @@ BEGIN;
|
|||
8
|
||||
9
|
||||
10
|
||||
(11 rows)
|
||||
11
|
||||
12
|
||||
13
|
||||
14
|
||||
15
|
||||
16
|
||||
17
|
||||
18
|
||||
19
|
||||
20
|
||||
(21 rows)
|
||||
|
||||
SELECT
|
||||
connection_count_to_node
|
||||
connection_count_to_node > current_setting('citus.max_shared_pool_size')::int
|
||||
FROM
|
||||
citus_remote_connection_stats()
|
||||
WHERE
|
||||
|
@ -527,10 +539,10 @@ BEGIN;
|
|||
database_name = 'regression'
|
||||
ORDER BY
|
||||
hostname, port;
|
||||
connection_count_to_node
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
3
|
||||
t
|
||||
t
|
||||
(2 rows)
|
||||
|
||||
ROLLBACK;
|
||||
|
|
|
@ -119,16 +119,6 @@ EXECUTE subquery_prepare_without_param;
|
|||
(5,4)
|
||||
(5 rows)
|
||||
|
||||
EXECUTE subquery_prepare_without_param;
|
||||
values_of_subquery
|
||||
---------------------------------------------------------------------
|
||||
(6,4)
|
||||
(6,3)
|
||||
(6,2)
|
||||
(6,1)
|
||||
(5,4)
|
||||
(5 rows)
|
||||
|
||||
EXECUTE subquery_prepare_param_on_partkey(1);
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy AS values_of_subquery FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND ((users_table.user_id OPERATOR(pg_catalog.=) 1) OR (users_table.user_id OPERATOR(pg_catalog.=) 2)) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) ORDER BY ROW(users_table.user_id, events_table.event_type)::subquery_prepared_statements.xy DESC LIMIT 5
|
||||
|
|
|
@ -80,3 +80,4 @@ test: isolation_insert_select_vs_all_on_mx
|
|||
test: isolation_ref_select_for_update_vs_all_on_mx
|
||||
test: isolation_ref_update_delete_upsert_vs_all_on_mx
|
||||
test: isolation_dis2ref_foreign_keys_on_mx
|
||||
test: isolation_metadata_sync_deadlock
|
||||
|
|
|
@ -96,7 +96,9 @@ test: multi_explain hyperscale_tutorial partitioned_intermediate_results distrib
|
|||
test: multi_basic_queries cross_join multi_complex_expressions multi_subquery multi_subquery_complex_queries multi_subquery_behavioral_analytics
|
||||
test: multi_subquery_complex_reference_clause multi_subquery_window_functions multi_view multi_sql_function multi_prepare_sql
|
||||
test: sql_procedure multi_function_in_join row_types materialized_view undistribute_table
|
||||
test: multi_subquery_in_where_reference_clause join geqo adaptive_executor propagate_set_commands
|
||||
test: multi_subquery_in_where_reference_clause adaptive_executor propagate_set_commands geqo
|
||||
# this should be run alone as it gets too many clients
|
||||
test: join_pushdown
|
||||
test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message
|
||||
test: multi_agg_distinct multi_agg_approximate_distinct multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction
|
||||
test: multi_reference_table multi_select_for_update relation_access_tracking pg13_with_ties
|
||||
|
@ -108,7 +110,7 @@ test: ch_bench_subquery_repartition
|
|||
test: multi_agg_type_conversion multi_count_type_conversion
|
||||
test: multi_partition_pruning single_hash_repartition_join
|
||||
test: multi_join_pruning multi_hash_pruning intermediate_result_pruning
|
||||
test: multi_null_minmax_value_pruning
|
||||
test: multi_null_minmax_value_pruning cursors
|
||||
test: multi_query_directory_cleanup
|
||||
test: multi_task_assignment_policy multi_cross_shard
|
||||
test: multi_utility_statements
|
||||
|
|
|
@ -418,6 +418,15 @@ push(@pgOptions, "max_parallel_workers_per_gather=0");
|
|||
# Allow CREATE SUBSCRIPTION to work
|
||||
push(@pgOptions, "wal_level='logical'");
|
||||
|
||||
# Faster logical replication status update so tests with logical replication
|
||||
# run faster
|
||||
push(@pgOptions, "wal_receiver_status_interval=1");
|
||||
|
||||
# Faster logical replication apply worker launch so tests with logical
|
||||
# replication run faster. This is used in ApplyLauncherMain in
|
||||
# src/backend/replication/logical/launcher.c.
|
||||
push(@pgOptions, "wal_retrieve_retry_interval=1000");
|
||||
|
||||
# Citus options set for the tests
|
||||
push(@pgOptions, "citus.shard_count=4");
|
||||
push(@pgOptions, "citus.shard_max_size=1500kB");
|
||||
|
|
|
@ -0,0 +1,153 @@
|
|||
#include "isolation_mx_common.include.spec"
|
||||
|
||||
setup
|
||||
{
|
||||
CREATE OR REPLACE FUNCTION trigger_metadata_sync()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
||||
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
||||
CREATE TABLE deadlock_detection_test (user_id int UNIQUE, some_val int);
|
||||
INSERT INTO deadlock_detection_test SELECT i, i FROM generate_series(1,7) i;
|
||||
SELECT create_distributed_table('deadlock_detection_test', 'user_id');
|
||||
|
||||
CREATE TABLE t2(a int);
|
||||
SELECT create_distributed_table('t2', 'a');
|
||||
}
|
||||
|
||||
teardown
|
||||
{
|
||||
DROP FUNCTION trigger_metadata_sync();
|
||||
DROP TABLE deadlock_detection_test;
|
||||
DROP TABLE t2;
|
||||
SET citus.shard_replication_factor = 1;
|
||||
SELECT citus_internal.restore_isolation_tester_func();
|
||||
}
|
||||
|
||||
session "s1"
|
||||
|
||||
step "increase-retry-interval"
|
||||
{
|
||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 20000;
|
||||
}
|
||||
|
||||
step "reset-retry-interval"
|
||||
{
|
||||
ALTER SYSTEM RESET citus.metadata_sync_retry_interval;
|
||||
}
|
||||
|
||||
step "enable-deadlock-detection"
|
||||
{
|
||||
ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO 1.1;
|
||||
}
|
||||
|
||||
step "disable-deadlock-detection"
|
||||
{
|
||||
ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO -1;
|
||||
}
|
||||
|
||||
step "reload-conf"
|
||||
{
|
||||
SELECT pg_reload_conf();
|
||||
}
|
||||
|
||||
step "s1-begin"
|
||||
{
|
||||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-update-1"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1;
|
||||
}
|
||||
|
||||
step "s1-update-2"
|
||||
{
|
||||
UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2;
|
||||
}
|
||||
|
||||
step "s1-commit"
|
||||
{
|
||||
COMMIT;
|
||||
}
|
||||
|
||||
step "s1-count-daemons"
|
||||
{
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%';
|
||||
}
|
||||
|
||||
step "s1-cancel-metadata-sync"
|
||||
{
|
||||
SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%';
|
||||
SELECT pg_sleep(2);
|
||||
}
|
||||
|
||||
session "s2"
|
||||
|
||||
step "s2-start-session-level-connection"
|
||||
{
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
}
|
||||
|
||||
step "s2-stop-connection"
|
||||
{
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
}
|
||||
|
||||
step "s2-begin-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
}
|
||||
|
||||
step "s2-update-1-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1');
|
||||
}
|
||||
|
||||
step "s2-update-2-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2');
|
||||
}
|
||||
|
||||
step "s2-truncate-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('TRUNCATE t2');
|
||||
}
|
||||
|
||||
step "s2-commit-on-worker"
|
||||
{
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
}
|
||||
|
||||
session "s3"
|
||||
|
||||
step "s3-invalidate-metadata"
|
||||
{
|
||||
update pg_dist_node SET metadatasynced = false;
|
||||
}
|
||||
|
||||
step "s3-resync"
|
||||
{
|
||||
SELECT trigger_metadata_sync();
|
||||
}
|
||||
|
||||
step "s3-wait"
|
||||
{
|
||||
SELECT pg_sleep(2);
|
||||
}
|
||||
|
||||
// Backends can block metadata sync. The following test verifies that if this happens,
|
||||
// we still do distributed deadlock detection. In the following, s2-truncate-on-worker
|
||||
// causes the concurrent metadata sync to be blocked. But s2 and s1 themselves are
|
||||
// themselves involved in a distributed deadlock.
|
||||
// See https://github.com/citusdata/citus/issues/4393 for more details.
|
||||
permutation "enable-deadlock-detection" "reload-conf" "s2-start-session-level-connection" "s1-begin" "s1-update-1" "s2-begin-on-worker" "s2-update-2-on-worker" "s2-truncate-on-worker" "s3-invalidate-metadata" "s3-resync" "s3-wait" "s2-update-1-on-worker" "s1-update-2" "s1-commit" "s2-commit-on-worker" "disable-deadlock-detection" "reload-conf" "s2-stop-connection"
|
||||
|
||||
// Test that when metadata sync is waiting for locks, cancelling it terminates it.
|
||||
// This is important in cases where the metadata sync daemon itself is involved in a deadlock.
|
||||
permutation "increase-retry-interval" "reload-conf" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate-on-worker" "s3-invalidate-metadata" "s3-resync" "s3-wait" "s1-count-daemons" "s1-cancel-metadata-sync" "s1-count-daemons" "reset-retry-interval" "reload-conf" "s2-commit-on-worker" "s2-stop-connection" "s3-resync" "s3-wait"
|
|
@ -0,0 +1,112 @@
|
|||
CREATE SCHEMA cursors;
|
||||
SET search_path TO cursors;
|
||||
|
||||
CREATE TABLE distributed_table (key int, value text);
|
||||
SELECT create_distributed_table('distributed_table', 'key');
|
||||
|
||||
|
||||
-- load some data, but not very small amounts because RETURN QUERY in plpgsql
|
||||
-- hard codes the cursor fetch to 50 rows on PG 12, though they might increase
|
||||
-- it sometime in the future, so be mindful
|
||||
INSERT INTO distributed_table SELECT i % 10, i::text FROM generate_series(0, 1000) i;
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION simple_cursor_on_dist_table(cursor_name refcursor) RETURNS refcursor AS '
|
||||
BEGIN
|
||||
OPEN $1 FOR SELECT DISTINCT key FROM distributed_table ORDER BY 1;
|
||||
RETURN $1;
|
||||
END;
|
||||
' LANGUAGE plpgsql;
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION cursor_with_intermediate_result_on_dist_table(cursor_name refcursor) RETURNS refcursor AS '
|
||||
BEGIN
|
||||
OPEN $1 FOR
|
||||
WITH cte_1 AS (SELECT * FROM distributed_table OFFSET 0)
|
||||
SELECT DISTINCT key FROM distributed_table WHERE value in (SELECT value FROM cte_1) ORDER BY 1;
|
||||
RETURN $1;
|
||||
END;
|
||||
' LANGUAGE plpgsql;
|
||||
|
||||
|
||||
CREATE OR REPLACE FUNCTION cursor_with_intermediate_result_on_dist_table_with_param(cursor_name refcursor, filter text) RETURNS refcursor AS '
|
||||
BEGIN
|
||||
OPEN $1 FOR
|
||||
WITH cte_1 AS (SELECT * FROM distributed_table WHERE value < $2 OFFSET 0)
|
||||
SELECT DISTINCT key FROM distributed_table WHERE value in (SELECT value FROM cte_1) ORDER BY 1;
|
||||
RETURN $1;
|
||||
END;
|
||||
' LANGUAGE plpgsql;
|
||||
|
||||
|
||||
-- pretty basic query with cursors
|
||||
-- Citus should plan/execute once and pull
|
||||
-- the results to coordinator, then serve it
|
||||
-- from the coordinator
|
||||
BEGIN;
|
||||
SELECT simple_cursor_on_dist_table('cursor_1');
|
||||
SET LOCAL citus.log_intermediate_results TO ON;
|
||||
SET LOCAL client_min_messages TO DEBUG1;
|
||||
FETCH 5 IN cursor_1;
|
||||
FETCH 50 IN cursor_1;
|
||||
FETCH ALL IN cursor_1;
|
||||
COMMIT;
|
||||
|
||||
|
||||
BEGIN;
|
||||
SELECT cursor_with_intermediate_result_on_dist_table('cursor_1');
|
||||
|
||||
-- multiple FETCH commands should not trigger re-running the subplans
|
||||
SET LOCAL citus.log_intermediate_results TO ON;
|
||||
SET LOCAL client_min_messages TO DEBUG1;
|
||||
FETCH 5 IN cursor_1;
|
||||
FETCH 1 IN cursor_1;
|
||||
FETCH ALL IN cursor_1;
|
||||
FETCH 5 IN cursor_1;
|
||||
COMMIT;
|
||||
|
||||
|
||||
BEGIN;
|
||||
SELECT cursor_with_intermediate_result_on_dist_table_with_param('cursor_1', '600');
|
||||
|
||||
-- multiple FETCH commands should not trigger re-running the subplans
|
||||
-- also test with parameters
|
||||
SET LOCAL citus.log_intermediate_results TO ON;
|
||||
SET LOCAL client_min_messages TO DEBUG1;
|
||||
FETCH 1 IN cursor_1;
|
||||
FETCH 1 IN cursor_1;
|
||||
FETCH 1 IN cursor_1;
|
||||
FETCH 1 IN cursor_1;
|
||||
FETCH 1 IN cursor_1;
|
||||
FETCH 1 IN cursor_1;
|
||||
FETCH ALL IN cursor_1;
|
||||
|
||||
COMMIT;
|
||||
|
||||
CREATE OR REPLACE FUNCTION value_counter() RETURNS TABLE(counter text) LANGUAGE PLPGSQL AS $function$
|
||||
BEGIN
|
||||
return query
|
||||
WITH cte AS
|
||||
(SELECT dt.value
|
||||
FROM distributed_table dt
|
||||
WHERE dt.value in
|
||||
(SELECT value
|
||||
FROM distributed_table p
|
||||
GROUP BY p.value
|
||||
HAVING count(*) > 0))
|
||||
|
||||
SELECT * FROM cte;
|
||||
END;
|
||||
$function$ ;
|
||||
|
||||
SET citus.log_intermediate_results TO ON;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
\set VERBOSITY terse
|
||||
SELECT count(*) from (SELECT value_counter()) as foo;
|
||||
BEGIN;
|
||||
SELECT count(*) from (SELECT value_counter()) as foo;
|
||||
COMMIT;
|
||||
|
||||
-- suppress NOTICEs
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA cursors CASCADE;
|
|
@ -100,7 +100,7 @@ SELECT * FROM test WHERE x = 1;
|
|||
|
||||
-- add the the follower as secondary nodes and try again, the SELECT statement
|
||||
-- should work this time
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
SET search_path TO single_node;
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :follower_master_port, groupid => 0, noderole => 'secondary');
|
||||
|
@ -138,7 +138,7 @@ SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
|
|||
RESET citus.task_assignment_policy;
|
||||
|
||||
-- Cleanup
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
SET search_path TO single_node;
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA single_node CASCADE;
|
||||
|
|
|
@ -151,7 +151,7 @@ SELECT * FROM reference_table ORDER BY a;
|
|||
INSERT INTO citus_local_table (a, b, z) VALUES (1, 2, 3);
|
||||
SELECT * FROM citus_local_table ORDER BY a;
|
||||
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
DROP TABLE the_table;
|
||||
DROP TABLE reference_table;
|
||||
DROP TABLE citus_local_table;
|
||||
|
|
|
@ -54,7 +54,7 @@ SELECT * FROM the_table;
|
|||
|
||||
-- add the secondary nodes and try again, the SELECT statement should work this time
|
||||
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port,
|
||||
groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_1_port),
|
||||
|
@ -106,12 +106,12 @@ order by s_i_id;
|
|||
-- now move the secondary nodes into the new cluster and see that the follower, finally
|
||||
-- correctly configured, can run select queries involving them
|
||||
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
UPDATE pg_dist_node SET nodecluster = 'second-cluster' WHERE noderole = 'secondary';
|
||||
\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'"
|
||||
SELECT * FROM the_table;
|
||||
|
||||
-- clean up after ourselves
|
||||
\c - - - :master_port
|
||||
\c -reuse-previous=off regression - - :master_port
|
||||
DROP TABLE the_table;
|
||||
DROP TABLE stock;
|
||||
|
|
|
@ -563,5 +563,138 @@ CREATE TABLE self_referencing_reference_table(
|
|||
SELECT create_reference_table('self_referencing_reference_table');
|
||||
ALTER TABLE self_referencing_reference_table ADD CONSTRAINT fk FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_reference_table(id, other_column);
|
||||
|
||||
-- make sure that if fkey is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int unique);
|
||||
CREATE TABLE dropfkeytest2 (x int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (x) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
ALTER TABLE dropfkeytest2 DROP CONSTRAINT fkey1;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
|
||||
-- make sure that if a column that is in a fkey is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int unique);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ALTER TABLE dropfkeytest2 DROP COLUMN y;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
|
||||
-- make sure that even if a column that is in a multi-column index is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int, y int);
|
||||
CREATE UNIQUE INDEX indd ON dropfkeytest1(x, y);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (x, y) REFERENCES dropfkeytest1(x, y);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ALTER TABLE dropfkeytest2 DROP COLUMN y CASCADE;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
|
||||
-- make sure that even if a column that is in a multi-column fkey is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int, y int);
|
||||
CREATE UNIQUE INDEX indd ON dropfkeytest1(x, y);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (x, y) REFERENCES dropfkeytest1(x, y);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ALTER TABLE dropfkeytest1 DROP COLUMN y CASCADE;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
|
||||
-- make sure that even if an index which fkey relies on is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
-- also irrelevant index drops doesn't affect this
|
||||
CREATE TABLE dropfkeytest1 (x int);
|
||||
CREATE UNIQUE INDEX i1 ON dropfkeytest1(x);
|
||||
CREATE UNIQUE INDEX unrelated_idx ON dropfkeytest1(x);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
DROP INDEX unrelated_idx CASCADE;
|
||||
-- should still error out since we didn't drop the index that foreign key depends on
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
DROP INDEX i1 CASCADE;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
|
||||
-- make sure that even if a uniqueness constraint which fkey depends on is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int unique);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ALTER TABLE dropfkeytest1 DROP CONSTRAINT dropfkeytest1_x_key CASCADE;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
|
||||
-- make sure that even if a primary key which fkey depends on is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int primary key);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
ALTER TABLE dropfkeytest1 DROP CONSTRAINT dropfkeytest1_pkey CASCADE;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
|
||||
DROP TABLE dropfkeytest1, dropfkeytest2 CASCADE;
|
||||
|
||||
-- make sure that even if a schema which fkey depends on is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE SCHEMA fkeytestsc;
|
||||
CREATE TABLE fkeytestsc.dropfkeytest1 (x int unique);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES fkeytestsc.dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('fkeytestsc.dropfkeytest1', 'x');
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
DROP SCHEMA fkeytestsc CASCADE;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
|
||||
DROP TABLE dropfkeytest2 CASCADE;
|
||||
|
||||
-- make sure that even if a table which fkey depends on is dropped
|
||||
-- Citus can see up-to date fkey graph
|
||||
CREATE TABLE dropfkeytest1 (x int unique);
|
||||
CREATE TABLE dropfkeytest2 (x int8, y int8);
|
||||
ALTER TABLE dropfkeytest2 ADD CONSTRAINT fkey1 FOREIGN KEY (y) REFERENCES dropfkeytest1(x);
|
||||
SELECT create_distributed_table ('dropfkeytest1', 'x');
|
||||
-- this should error out
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'y', colocate_with:='none');
|
||||
DROP TABLE dropfkeytest1 CASCADE;
|
||||
-- this should work
|
||||
SELECT create_distributed_table ('dropfkeytest2', 'x', colocate_with:='none');
|
||||
|
||||
-- we no longer need those tables
|
||||
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table;
|
||||
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table, dropfkeytest2;
|
||||
|
|
|
@ -27,6 +27,30 @@ CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLE
|
|||
ARRAY['SELECT pg_reload_conf()'], false);
|
||||
$$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION trigger_metadata_sync()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
||||
CREATE OR REPLACE FUNCTION raise_error_in_metadata_sync()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
||||
CREATE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$
|
||||
declare
|
||||
counter integer := -1;
|
||||
begin
|
||||
while counter != target_count loop
|
||||
-- pg_stat_activity is cached at xact level and there is no easy way to clear it.
|
||||
-- Look it up in a new connection to get latest updates.
|
||||
SELECT result::int into counter FROM
|
||||
master_run_on_worker(ARRAY['localhost'], ARRAY[57636], ARRAY[
|
||||
'SELECT count(*) FROM pg_stat_activity WHERE application_name = ' || quote_literal(appname) || ';'], false);
|
||||
PERFORM pg_sleep(0.1);
|
||||
end loop;
|
||||
end$$ LANGUAGE plpgsql;
|
||||
|
||||
-- add a node to the cluster
|
||||
SELECT master_add_node('localhost', :worker_1_port) As nodeid_1 \gset
|
||||
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
|
@ -79,6 +103,54 @@ END;
|
|||
SELECT wait_until_metadata_sync(30000);
|
||||
SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
|
||||
-- verify that metadata sync daemon has started
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Metadata Sync Daemon';
|
||||
|
||||
--
|
||||
-- terminate maintenance daemon, and verify that we don't spawn multiple
|
||||
-- metadata sync daemons
|
||||
--
|
||||
SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
||||
CALL wait_until_process_count('Citus Maintenance Daemon', 1);
|
||||
select trigger_metadata_sync();
|
||||
select wait_until_metadata_sync();
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Metadata Sync Daemon';
|
||||
|
||||
--
|
||||
-- cancel metadata sync daemon, and verify that it exits and restarts.
|
||||
--
|
||||
select pid as pid_before_cancel from pg_stat_activity where application_name like 'Citus Met%' \gset
|
||||
select pg_cancel_backend(pid) from pg_stat_activity where application_name = 'Citus Metadata Sync Daemon';
|
||||
select wait_until_metadata_sync();
|
||||
select pid as pid_after_cancel from pg_stat_activity where application_name like 'Citus Met%' \gset
|
||||
select :pid_before_cancel != :pid_after_cancel AS metadata_sync_restarted;
|
||||
|
||||
--
|
||||
-- cancel metadata sync daemon so it exits and restarts, but at the
|
||||
-- same time tell maintenanced to trigger a new metadata sync. One
|
||||
-- of these should exit to avoid multiple metadata syncs.
|
||||
--
|
||||
select pg_cancel_backend(pid) from pg_stat_activity where application_name = 'Citus Metadata Sync Daemon';
|
||||
select trigger_metadata_sync();
|
||||
select wait_until_metadata_sync();
|
||||
-- we assume citus.metadata_sync_retry_interval is 500ms. Change amount we sleep to ceiling + 0.2 if it changes.
|
||||
select pg_sleep(1.2);
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Metadata Sync Daemon';
|
||||
|
||||
--
|
||||
-- error in metadata sync daemon, and verify it exits and restarts.
|
||||
--
|
||||
select pid as pid_before_error from pg_stat_activity where application_name like 'Citus Met%' \gset
|
||||
select raise_error_in_metadata_sync();
|
||||
select wait_until_metadata_sync(30000);
|
||||
select pid as pid_after_error from pg_stat_activity where application_name like 'Citus Met%' \gset
|
||||
select :pid_before_error != :pid_after_error AS metadata_sync_restarted;
|
||||
|
||||
|
||||
SELECT trigger_metadata_sync();
|
||||
SELECT wait_until_metadata_sync(30000);
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Metadata Sync Daemon';
|
||||
|
||||
-- update it back to :worker_1_port, now metadata should be synced
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port);
|
||||
SELECT wait_until_metadata_sync(30000);
|
||||
|
@ -249,6 +321,39 @@ SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
|||
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
|
||||
-- verify that metadata sync daemon exits
|
||||
call wait_until_process_count('Citus Metadata Sync Daemon', 0);
|
||||
|
||||
-- verify that DROP DATABASE terminates metadata sync
|
||||
SELECT current_database() datname \gset
|
||||
CREATE DATABASE db_to_drop;
|
||||
SELECT run_command_on_workers('CREATE DATABASE db_to_drop');
|
||||
|
||||
\c db_to_drop - - :worker_1_port
|
||||
CREATE EXTENSION citus;
|
||||
|
||||
\c db_to_drop - - :master_port
|
||||
CREATE EXTENSION citus;
|
||||
SELECT master_add_node('localhost', :worker_1_port);
|
||||
UPDATE pg_dist_node SET hasmetadata = true;
|
||||
|
||||
SELECT master_update_node(nodeid, 'localhost', 12345) FROM pg_dist_node;
|
||||
|
||||
CREATE OR REPLACE FUNCTION trigger_metadata_sync()
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'citus';
|
||||
|
||||
SELECT trigger_metadata_sync();
|
||||
|
||||
\c :datname - - :master_port
|
||||
|
||||
SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%';
|
||||
|
||||
DROP DATABASE db_to_drop;
|
||||
|
||||
SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%';
|
||||
|
||||
-- cleanup
|
||||
DROP TABLE ref_table;
|
||||
TRUNCATE pg_dist_colocation;
|
||||
|
|
|
@ -23,7 +23,7 @@ WITH dist_node_summary AS (
|
|||
ARRAY[dist_node_summary.query, dist_node_summary.query],
|
||||
false)
|
||||
), dist_placement_summary AS (
|
||||
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement)' AS query
|
||||
SELECT 'SELECT jsonb_agg(pg_dist_placement ORDER BY shardid) FROM pg_dist_placement' AS query
|
||||
), dist_placement_check AS (
|
||||
SELECT count(distinct result) = 1 AS matches
|
||||
FROM dist_placement_summary CROSS JOIN LATERAL
|
||||
|
|
|
@ -338,14 +338,16 @@ BEGIN;
|
|||
hostname, port;
|
||||
ROLLBACK;
|
||||
|
||||
-- INSERT SELECT with RETURNING/ON CONFLICT clauses should honor shared_pool_size
|
||||
-- INSERT SELECT with RETURNING/ON CONFLICT clauses does not honor shared_pool_size
|
||||
-- in underlying COPY commands
|
||||
BEGIN;
|
||||
SELECT pg_sleep(0.1);
|
||||
INSERT INTO test SELECT i FROM generate_series(0,10) i RETURNING *;
|
||||
-- make sure that we hit at least 4 shards per node, where 20 rows
|
||||
-- is enough
|
||||
INSERT INTO test SELECT i FROM generate_series(0,20) i RETURNING *;
|
||||
|
||||
SELECT
|
||||
connection_count_to_node
|
||||
connection_count_to_node > current_setting('citus.max_shared_pool_size')::int
|
||||
FROM
|
||||
citus_remote_connection_stats()
|
||||
WHERE
|
||||
|
|
|
@ -64,7 +64,6 @@ EXECUTE subquery_prepare_without_param;
|
|||
EXECUTE subquery_prepare_without_param;
|
||||
EXECUTE subquery_prepare_without_param;
|
||||
EXECUTE subquery_prepare_without_param;
|
||||
EXECUTE subquery_prepare_without_param;
|
||||
|
||||
|
||||
EXECUTE subquery_prepare_param_on_partkey(1);
|
||||
|
|
Loading…
Reference in New Issue