Merge branch 'master' into velioglu/table_wo_seq_prototype

velioglu/wo_seq_test_1
Burak Velioglu 2022-01-04 12:53:14 +03:00
commit c60f77bb99
No known key found for this signature in database
GPG Key ID: F6827E620F6549C6
59 changed files with 1438 additions and 255 deletions

View File

@ -6,10 +6,20 @@ orbs:
parameters: parameters:
image_suffix: image_suffix:
type: string type: string
default: "-v2021_10_27" default: '-v2021_10_27'
pg12_version:
type: string
default: '12.8'
pg13_version:
type: string
default: '13.4'
pg14_version:
type: string
default: '14.0'
upgrade_pg_versions:
type: string
default: '12.8-13.4-14.0'
jobs: jobs:
build: build:
description: Build the citus extension description: Build the citus extension
parameters: parameters:
@ -47,40 +57,22 @@ jobs:
command: citus_indent --check command: citus_indent --check
- run: - run:
name: 'Fix whitespace' name: 'Fix whitespace'
command: ci/editorconfig.sh command: ci/editorconfig.sh && git diff --exit-code
- run:
name: 'Check if whitespace fixing changed anything, install editorconfig if it did'
command: git diff --exit-code
- run: - run:
name: 'Remove useless declarations' name: 'Remove useless declarations'
command: ci/remove_useless_declarations.sh command: ci/remove_useless_declarations.sh && git diff --cached --exit-code
- run:
name: 'Check if changed'
command: git diff --cached --exit-code
- run: - run:
name: 'Normalize test output' name: 'Normalize test output'
command: ci/normalize_expected.sh command: ci/normalize_expected.sh && git diff --exit-code
- run:
name: 'Check if changed'
command: git diff --exit-code
- run: - run:
name: 'Check for C-style comments in migration files' name: 'Check for C-style comments in migration files'
command: ci/disallow_c_comments_in_migrations.sh command: ci/disallow_c_comments_in_migrations.sh && git diff --exit-code
- run: - run:
name: 'Check if changed' name: 'Check for comment--cached ns that start with # character in spec files'
command: git diff --exit-code command: ci/disallow_hash_comments_in_spec_files.sh && git diff --exit-code
- run:
name: 'Check for comments that start with # character in spec files'
command: ci/disallow_hash_comments_in_spec_files.sh
- run:
name: 'Check if changed'
command: git diff --exit-code
- run: - run:
name: 'Check for gitignore entries .for source files' name: 'Check for gitignore entries .for source files'
command: ci/fix_gitignore.sh command: ci/fix_gitignore.sh && git diff --exit-code
- run:
name: 'Check if changed'
command: git diff --exit-code
- run: - run:
name: 'Check for lengths of changelog entries' name: 'Check for lengths of changelog entries'
command: ci/disallow_long_changelog_entries.sh command: ci/disallow_long_changelog_entries.sh
@ -180,11 +172,9 @@ jobs:
- store_artifacts: - store_artifacts:
name: 'Save regressions' name: 'Save regressions'
path: src/test/regress/regression.diffs path: src/test/regress/regression.diffs
when: on_fail
- store_artifacts: - store_artifacts:
name: 'Save core dumps' name: 'Save core dumps'
path: /tmp/core_dumps path: /tmp/core_dumps
when: on_fail
- store_artifacts: - store_artifacts:
name: 'Save pg_upgrade logs for newData dir' name: 'Save pg_upgrade logs for newData dir'
path: /tmp/pg_upgrade_newData_logs path: /tmp/pg_upgrade_newData_logs
@ -267,17 +257,16 @@ jobs:
name: 'Save core dumps' name: 'Save core dumps'
path: /tmp/core_dumps path: /tmp/core_dumps
- store_artifacts: - store_artifacts:
name: "Save logfiles" name: 'Save logfiles'
path: src/test/regress/tmp_citus_test/logfiles path: src/test/regress/tmp_citus_test/logfiles
- codecov/upload: - codecov/upload:
flags: 'test_<< parameters.pg_major >>,upgrade' flags: 'test_<< parameters.pg_major >>,upgrade'
test-citus-upgrade: test-citus-upgrade:
description: Runs citus upgrade tests description: Runs citus upgrade tests
parameters: parameters:
pg_major: pg_major:
description: "postgres major version" description: 'postgres major version'
type: integer type: integer
image: image:
description: 'docker image to use as for the tests' description: 'docker image to use as for the tests'
@ -348,11 +337,9 @@ jobs:
- store_artifacts: - store_artifacts:
name: 'Save regressions' name: 'Save regressions'
path: src/test/regress/regression.diffs path: src/test/regress/regression.diffs
when: on_fail
- store_artifacts: - store_artifacts:
name: 'Save core dumps' name: 'Save core dumps'
path: /tmp/core_dumps path: /tmp/core_dumps
when: on_fail
- codecov/upload: - codecov/upload:
flags: 'test_<< parameters.pg_major >>,upgrade' flags: 'test_<< parameters.pg_major >>,upgrade'
@ -360,7 +347,7 @@ jobs:
description: Runs the common tests of citus description: Runs the common tests of citus
parameters: parameters:
pg_major: pg_major:
description: "postgres major version" description: 'postgres major version'
type: integer type: integer
image: image:
description: 'docker image to use as for the tests' description: 'docker image to use as for the tests'
@ -370,7 +357,7 @@ jobs:
description: 'docker image tag to use' description: 'docker image tag to use'
type: string type: string
make: make:
description: "make target" description: 'make target'
type: string type: string
docker: docker:
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
@ -416,18 +403,15 @@ jobs:
- store_artifacts: - store_artifacts:
name: 'Save regressions' name: 'Save regressions'
path: src/test/regress/regression.diffs path: src/test/regress/regression.diffs
when: on_fail
- store_artifacts: - store_artifacts:
name: 'Save mitmproxy output (failure test specific)' name: 'Save mitmproxy output (failure test specific)'
path: src/test/regress/proxy.output path: src/test/regress/proxy.output
- store_artifacts: - store_artifacts:
name: 'Save results' name: 'Save results'
path: src/test/regress/results/ path: src/test/regress/results/
when: on_fail
- store_artifacts: - store_artifacts:
name: 'Save core dumps' name: 'Save core dumps'
path: /tmp/core_dumps path: /tmp/core_dumps
when: on_fail
- codecov/upload: - codecov/upload:
flags: 'test_<< parameters.pg_major >>,<< parameters.make >>' flags: 'test_<< parameters.pg_major >>,<< parameters.make >>'
when: always when: always
@ -436,7 +420,7 @@ jobs:
description: Runs tap tests for citus description: Runs tap tests for citus
parameters: parameters:
pg_major: pg_major:
description: "postgres major version" description: 'postgres major version'
type: integer type: integer
image: image:
description: 'docker image to use as for the tests' description: 'docker image to use as for the tests'
@ -449,7 +433,7 @@ jobs:
description: 'name of the tap test suite to run' description: 'name of the tap test suite to run'
type: string type: string
make: make:
description: "make target" description: 'make target'
type: string type: string
default: installcheck default: installcheck
docker: docker:
@ -488,18 +472,16 @@ jobs:
- store_artifacts: - store_artifacts:
name: 'Save tap logs' name: 'Save tap logs'
path: /home/circleci/project/src/test/recovery/tmp_check/log path: /home/circleci/project/src/test/recovery/tmp_check/log
when: on_fail
- store_artifacts: - store_artifacts:
name: 'Save core dumps' name: 'Save core dumps'
path: /tmp/core_dumps path: /tmp/core_dumps
when: on_fail
- codecov/upload: - codecov/upload:
flags: 'test_<< parameters.pg_major >>,tap_<< parameters.suite >>_<< parameters.make >>' flags: 'test_<< parameters.pg_major >>,tap_<< parameters.suite >>_<< parameters.make >>'
when: always when: always
check-merge-to-enterprise: check-merge-to-enterprise:
docker: docker:
- image: citus/extbuilder:13.4 - image: citus/extbuilder:<< pipeline.parameters.pg13_version >>
working_directory: /home/circleci/project working_directory: /home/circleci/project
steps: steps:
- checkout - checkout
@ -541,7 +523,6 @@ workflows:
version: 2 version: 2
build_and_test: build_and_test:
jobs: jobs:
- check-merge-to-enterprise: - check-merge-to-enterprise:
filters: filters:
branches: branches:
@ -551,15 +532,15 @@ workflows:
- build: - build:
name: build-12 name: build-12
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
- build: - build:
name: build-13 name: build-13
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
- build: - build:
name: build-14 name: build-14
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
- check-style - check-style
- check-sql-snapshots - check-sql-snapshots
@ -567,266 +548,266 @@ workflows:
- test-citus: - test-citus:
name: 'test-12_check-multi' name: 'test-12_check-multi'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-multi make: check-multi
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-12_check-multi-1' name: 'test-12_check-multi-1'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-multi-1 make: check-multi-1
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-12_check-mx' name: 'test-12_check-mx'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-multi-mx make: check-multi-mx
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-12_check-vanilla' name: 'test-12_check-vanilla'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-vanilla make: check-vanilla
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-12_check-isolation' name: 'test-12_check-isolation'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-isolation make: check-isolation
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-12_check-worker' name: 'test-12_check-worker'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-worker make: check-worker
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-12_check-operations' name: 'test-12_check-operations'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-operations make: check-operations
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-12_check-follower-cluster' name: 'test-12_check-follower-cluster'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-follower-cluster make: check-follower-cluster
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-12_check-columnar' name: 'test-12_check-columnar'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-columnar make: check-columnar
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-12_check-columnar-isolation' name: 'test-12_check-columnar-isolation'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-columnar-isolation make: check-columnar-isolation
requires: [build-12] requires: [build-12]
- tap-test-citus: - tap-test-citus:
name: 'test_12_tap-recovery' name: 'test_12_tap-recovery'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
suite: recovery suite: recovery
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-12_check-failure' name: 'test-12_check-failure'
pg_major: 12 pg_major: 12
image: citus/failtester image: citus/failtester
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
make: check-failure make: check-failure
requires: [build-12] requires: [build-12]
- test-citus: - test-citus:
name: 'test-13_check-multi' name: 'test-13_check-multi'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-multi make: check-multi
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-13_check-multi-1' name: 'test-13_check-multi-1'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-multi-1 make: check-multi-1
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-13_check-mx' name: 'test-13_check-mx'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-multi-mx make: check-multi-mx
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-13_check-vanilla' name: 'test-13_check-vanilla'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-vanilla make: check-vanilla
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-13_check-isolation' name: 'test-13_check-isolation'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-isolation make: check-isolation
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-13_check-worker' name: 'test-13_check-worker'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-worker make: check-worker
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-13_check-operations' name: 'test-13_check-operations'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-operations make: check-operations
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-13_check-follower-cluster' name: 'test-13_check-follower-cluster'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-follower-cluster make: check-follower-cluster
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-13_check-columnar' name: 'test-13_check-columnar'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-columnar make: check-columnar
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-13_check-columnar-isolation' name: 'test-13_check-columnar-isolation'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-columnar-isolation make: check-columnar-isolation
requires: [build-13] requires: [build-13]
- tap-test-citus: - tap-test-citus:
name: 'test_13_tap-recovery' name: 'test_13_tap-recovery'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
suite: recovery suite: recovery
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-13_check-failure' name: 'test-13_check-failure'
pg_major: 13 pg_major: 13
image: citus/failtester image: citus/failtester
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
make: check-failure make: check-failure
requires: [build-13] requires: [build-13]
- test-citus: - test-citus:
name: 'test-14_check-multi' name: 'test-14_check-multi'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-multi make: check-multi
requires: [build-14] requires: [build-14]
- test-citus: - test-citus:
name: 'test-14_check-multi-1' name: 'test-14_check-multi-1'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-multi-1 make: check-multi-1
requires: [build-14] requires: [build-14]
- test-citus: - test-citus:
name: 'test-14_check-mx' name: 'test-14_check-mx'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-multi-mx make: check-multi-mx
requires: [build-14] requires: [build-14]
- test-citus: - test-citus:
name: 'test-14_check-vanilla' name: 'test-14_check-vanilla'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-vanilla make: check-vanilla
requires: [build-14] requires: [build-14]
- test-citus: - test-citus:
name: 'test-14_check-isolation' name: 'test-14_check-isolation'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-isolation make: check-isolation
requires: [build-14] requires: [build-14]
- test-citus: - test-citus:
name: 'test-14_check-worker' name: 'test-14_check-worker'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-worker make: check-worker
requires: [build-14] requires: [build-14]
- test-citus: - test-citus:
name: 'test-14_check-operations' name: 'test-14_check-operations'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-operations make: check-operations
requires: [build-14] requires: [build-14]
- test-citus: - test-citus:
name: 'test-14_check-follower-cluster' name: 'test-14_check-follower-cluster'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-follower-cluster make: check-follower-cluster
requires: [build-14] requires: [build-14]
- test-citus: - test-citus:
name: 'test-14_check-columnar' name: 'test-14_check-columnar'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-columnar make: check-columnar
requires: [build-14] requires: [build-14]
- test-citus: - test-citus:
name: 'test-14_check-columnar-isolation' name: 'test-14_check-columnar-isolation'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-columnar-isolation make: check-columnar-isolation
requires: [build-14] requires: [build-14]
- tap-test-citus: - tap-test-citus:
name: 'test_14_tap-recovery' name: 'test_14_tap-recovery'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
suite: recovery suite: recovery
requires: [build-14] requires: [build-14]
- test-citus: - test-citus:
name: 'test-14_check-failure' name: 'test-14_check-failure'
pg_major: 14 pg_major: 14
image: citus/failtester image: citus/failtester
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
make: check-failure make: check-failure
requires: [build-14] requires: [build-14]
- test-arbitrary-configs: - test-arbitrary-configs:
name: 'test-12_check-arbitrary-configs' name: 'test-12_check-arbitrary-configs'
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
requires: [build-12] requires: [build-12]
- test-arbitrary-configs: - test-arbitrary-configs:
name: 'test-13_check-arbitrary-configs' name: 'test-13_check-arbitrary-configs'
pg_major: 13 pg_major: 13
image_tag: '13.4' image_tag: '<< pipeline.parameters.pg13_version >>'
requires: [build-13] requires: [build-13]
- test-arbitrary-configs: - test-arbitrary-configs:
name: 'test-14_check-arbitrary-configs' name: 'test-14_check-arbitrary-configs'
pg_major: 14 pg_major: 14
image_tag: '14.0' image_tag: '<< pipeline.parameters.pg14_version >>'
requires: [build-14] requires: [build-14]
- test-pg-upgrade: - test-pg-upgrade:
name: 'test-12-13_check-pg-upgrade' name: 'test-12-13_check-pg-upgrade'
old_pg_major: 12 old_pg_major: 12
new_pg_major: 13 new_pg_major: 13
image_tag: '12.8-13.4-14.0' image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
requires: [build-12,build-13] requires: [build-12, build-13]
- test-pg-upgrade: - test-pg-upgrade:
name: 'test-12-14_check-pg-upgrade' name: 'test-12-14_check-pg-upgrade'
old_pg_major: 12 old_pg_major: 12
new_pg_major: 14 new_pg_major: 14
image_tag: '12.8-13.4-14.0' image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
requires: [build-12,build-14] requires: [build-12, build-14]
- test-pg-upgrade: - test-pg-upgrade:
name: 'test-13-14_check-pg-upgrade' name: 'test-13-14_check-pg-upgrade'
old_pg_major: 13 old_pg_major: 13
new_pg_major: 14 new_pg_major: 14
image_tag: '12.8-13.4-14.0' image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
requires: [build-13,build-14] requires: [build-13, build-14]
- test-citus-upgrade: - test-citus-upgrade:
name: test-12_check-citus-upgrade name: test-12_check-citus-upgrade
pg_major: 12 pg_major: 12
image_tag: '12.8' image_tag: '<< pipeline.parameters.pg12_version >>'
requires: [build-12] requires: [build-12]
- ch_benchmark: - ch_benchmark:

View File

@ -3,6 +3,7 @@
[![Slack Status](http://slack.citusdata.com/badge.svg)](https://slack.citusdata.com) [![Slack Status](http://slack.citusdata.com/badge.svg)](https://slack.citusdata.com)
[![Latest Docs](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://docs.citusdata.com/) [![Latest Docs](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://docs.citusdata.com/)
[![Code Coverage](https://codecov.io/gh/citusdata/citus/branch/master/graph/badge.svg)](https://app.codecov.io/gh/citusdata/citus) [![Code Coverage](https://codecov.io/gh/citusdata/citus/branch/master/graph/badge.svg)](https://app.codecov.io/gh/citusdata/citus)
[![Stack Overflow](https://img.shields.io/badge/Stack%20Overflow-%20-545353?logo=Stack%20Overflow)](https://stackoverflow.com/questions/tagged/citus)
## What is Citus? ## What is Citus?

View File

@ -2660,7 +2660,7 @@ upgrade_columnar_storage(PG_FUNCTION_ARGS)
* ACCESS EXCLUSIVE LOCK is not required by the low-level routines, so we * ACCESS EXCLUSIVE LOCK is not required by the low-level routines, so we
* can take only an ACCESS SHARE LOCK. But all access to non-current * can take only an ACCESS SHARE LOCK. But all access to non-current
* columnar tables will fail anyway, so it's better to take ACCESS * columnar tables will fail anyway, so it's better to take ACCESS
* EXLUSIVE LOCK now. * EXCLUSIVE LOCK now.
*/ */
Relation rel = table_open(relid, AccessExclusiveLock); Relation rel = table_open(relid, AccessExclusiveLock);
if (!IsColumnarTableAmTable(relid)) if (!IsColumnarTableAmTable(relid))
@ -2696,7 +2696,7 @@ downgrade_columnar_storage(PG_FUNCTION_ARGS)
* ACCESS EXCLUSIVE LOCK is not required by the low-level routines, so we * ACCESS EXCLUSIVE LOCK is not required by the low-level routines, so we
* can take only an ACCESS SHARE LOCK. But all access to non-current * can take only an ACCESS SHARE LOCK. But all access to non-current
* columnar tables will fail anyway, so it's better to take ACCESS * columnar tables will fail anyway, so it's better to take ACCESS
* EXLUSIVE LOCK now. * EXCLUSIVE LOCK now.
*/ */
Relation rel = table_open(relid, AccessExclusiveLock); Relation rel = table_open(relid, AccessExclusiveLock);
if (!IsColumnarTableAmTable(relid)) if (!IsColumnarTableAmTable(relid))

View File

@ -1190,7 +1190,7 @@ CreateDistributedTableLike(TableConversionState *con)
* at this moment, but that's going to be the table in pg_dist_partition. * at this moment, but that's going to be the table in pg_dist_partition.
*/ */
Oid parentRelationId = PartitionParentOid(originalRelationId); Oid parentRelationId = PartitionParentOid(originalRelationId);
Var *parentDistKey = DistPartitionKey(parentRelationId); Var *parentDistKey = DistPartitionKeyOrError(parentRelationId);
char *parentDistKeyColumnName = char *parentDistKeyColumnName =
ColumnToColumnName(parentRelationId, nodeToString(parentDistKey)); ColumnToColumnName(parentRelationId, nodeToString(parentDistKey));

View File

@ -326,6 +326,11 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
return CreateExtensionDDLCommand(dependency); return CreateExtensionDDLCommand(dependency);
} }
case OCLASS_FOREIGN_SERVER:
{
return GetForeignServerCreateDDLCommand(dependency->objectId);
}
default: default:
{ {
break; break;

View File

@ -89,6 +89,14 @@ static DistributeObjectOps Any_AlterExtensionContents = {
.address = NULL, .address = NULL,
.markDistributed = false, .markDistributed = false,
}; };
static DistributeObjectOps Any_AlterForeignServer = {
.deparse = DeparseAlterForeignServerStmt,
.qualify = NULL,
.preprocess = PreprocessAlterForeignServerStmt,
.postprocess = NULL,
.address = NULL,
.markDistributed = false,
};
static DistributeObjectOps Any_AlterFunction = { static DistributeObjectOps Any_AlterFunction = {
.deparse = DeparseAlterFunctionStmt, .deparse = DeparseAlterFunctionStmt,
.qualify = QualifyAlterFunctionStmt, .qualify = QualifyAlterFunctionStmt,
@ -177,6 +185,14 @@ static DistributeObjectOps Any_CreatePolicy = {
.address = NULL, .address = NULL,
.markDistributed = false, .markDistributed = false,
}; };
static DistributeObjectOps Any_CreateForeignServer = {
.deparse = DeparseCreateForeignServerStmt,
.qualify = NULL,
.preprocess = PreprocessCreateForeignServerStmt,
.postprocess = PostprocessCreateForeignServerStmt,
.address = CreateForeignServerStmtObjectAddress,
.markDistributed = true,
};
static DistributeObjectOps Any_CreateStatistics = { static DistributeObjectOps Any_CreateStatistics = {
.deparse = DeparseCreateStatisticsStmt, .deparse = DeparseCreateStatisticsStmt,
.qualify = QualifyCreateStatisticsStmt, .qualify = QualifyCreateStatisticsStmt,
@ -297,6 +313,30 @@ static DistributeObjectOps Extension_Drop = {
.address = NULL, .address = NULL,
.markDistributed = false, .markDistributed = false,
}; };
static DistributeObjectOps ForeignServer_Drop = {
.deparse = DeparseDropForeignServerStmt,
.qualify = NULL,
.preprocess = PreprocessDropForeignServerStmt,
.postprocess = NULL,
.address = NULL,
.markDistributed = false,
};
static DistributeObjectOps ForeignServer_Rename = {
.deparse = DeparseAlterForeignServerRenameStmt,
.qualify = NULL,
.preprocess = PreprocessRenameForeignServerStmt,
.postprocess = NULL,
.address = NULL,
.markDistributed = false,
};
static DistributeObjectOps ForeignServer_AlterOwner = {
.deparse = DeparseAlterForeignServerOwnerStmt,
.qualify = NULL,
.preprocess = PreprocessAlterForeignServerOwnerStmt,
.postprocess = PostprocessAlterForeignServerOwnerStmt,
.address = AlterForeignServerOwnerStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps ForeignTable_AlterTable = { static DistributeObjectOps ForeignTable_AlterTable = {
.deparse = NULL, .deparse = NULL,
.qualify = NULL, .qualify = NULL,
@ -675,6 +715,11 @@ GetDistributeObjectOps(Node *node)
return &Any_AlterFunction; return &Any_AlterFunction;
} }
case T_AlterForeignServerStmt:
{
return &Any_AlterForeignServer;
}
case T_AlterObjectDependsStmt: case T_AlterObjectDependsStmt:
{ {
AlterObjectDependsStmt *stmt = castNode(AlterObjectDependsStmt, node); AlterObjectDependsStmt *stmt = castNode(AlterObjectDependsStmt, node);
@ -789,6 +834,11 @@ GetDistributeObjectOps(Node *node)
return &Database_AlterOwner; return &Database_AlterOwner;
} }
case OBJECT_FOREIGN_SERVER:
{
return &ForeignServer_AlterOwner;
}
case OBJECT_FUNCTION: case OBJECT_FUNCTION:
{ {
return &Function_AlterOwner; return &Function_AlterOwner;
@ -915,6 +965,11 @@ GetDistributeObjectOps(Node *node)
return &Any_CreateFunction; return &Any_CreateFunction;
} }
case T_CreateForeignServerStmt:
{
return &Any_CreateForeignServer;
}
case T_CreatePolicyStmt: case T_CreatePolicyStmt:
{ {
return &Any_CreatePolicy; return &Any_CreatePolicy;
@ -977,6 +1032,11 @@ GetDistributeObjectOps(Node *node)
return &Function_Drop; return &Function_Drop;
} }
case OBJECT_FOREIGN_SERVER:
{
return &ForeignServer_Drop;
}
case OBJECT_INDEX: case OBJECT_INDEX:
{ {
return &Index_Drop; return &Index_Drop;
@ -1081,6 +1141,11 @@ GetDistributeObjectOps(Node *node)
return &Collation_Rename; return &Collation_Rename;
} }
case OBJECT_FOREIGN_SERVER:
{
return &ForeignServer_Rename;
}
case OBJECT_FUNCTION: case OBJECT_FUNCTION:
{ {
return &Function_Rename; return &Function_Rename;

View File

@ -797,7 +797,7 @@ CreateExtensionDDLCommand(const ObjectAddress *extensionAddress)
/* /*
* RecreateEnumStmt returns a parsetree for a CREATE EXTENSION statement that would * RecreateExtensionStmt returns a parsetree for a CREATE EXTENSION statement that would
* recreate the given extension on a new node. * recreate the given extension on a new node.
*/ */
static Node * static Node *

View File

@ -0,0 +1,364 @@
/*-------------------------------------------------------------------------
*
* foreign_server.c
* Commands for FOREIGN SERVER statements.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "catalog/pg_foreign_server.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/commands.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata_sync.h"
#include "distributed/worker_transaction.h"
#include "foreign/foreign.h"
#include "nodes/makefuncs.h"
#include "nodes/parsenodes.h"
#include "nodes/primnodes.h"
static Node * RecreateForeignServerStmt(Oid serverId);
static bool NameListHasDistributedServer(List *serverNames);
static ObjectAddress GetObjectAddressByServerName(char *serverName, bool missing_ok);
/*
* PreprocessCreateForeignServerStmt is called during the planning phase for
* CREATE SERVER.
*/
List *
PreprocessCreateForeignServerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
if (!ShouldPropagate())
{
return NIL;
}
EnsureCoordinator();
char *sql = DeparseTreeNode(node);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* PreprocessAlterForeignServerStmt is called during the planning phase for
* ALTER SERVER .. OPTIONS ..
*/
List *
PreprocessAlterForeignServerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
AlterForeignServerStmt *stmt = castNode(AlterForeignServerStmt, node);
ObjectAddress address = GetObjectAddressByServerName(stmt->servername, false);
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
char *sql = DeparseTreeNode(node);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* PreprocessRenameForeignServerStmt is called during the planning phase for
* ALTER SERVER RENAME.
*/
List *
PreprocessRenameForeignServerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_FOREIGN_SERVER);
ObjectAddress address = GetObjectAddressByServerName(strVal(stmt->object), false);
/* filter distributed servers */
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
char *sql = DeparseTreeNode(node);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* PreprocessAlterForeignServerOwnerStmt is called during the planning phase for
* ALTER SERVER .. OWNER TO.
*/
List *
PreprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_FOREIGN_SERVER);
ObjectAddress address = GetObjectAddressByServerName(strVal(stmt->object), false);
/* filter distributed servers */
if (!ShouldPropagateObject(&address))
{
return NIL;
}
EnsureCoordinator();
char *sql = DeparseTreeNode(node);
/* to prevent recursion with mx we disable ddl propagation */
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* PreprocessDropForeignServerStmt is called during the planning phase for
* DROP SERVER.
*/
List *
PreprocessDropForeignServerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
DropStmt *stmt = castNode(DropStmt, node);
Assert(stmt->removeType == OBJECT_FOREIGN_SERVER);
bool includesDistributedServer = NameListHasDistributedServer(stmt->objects);
if (!includesDistributedServer)
{
return NIL;
}
if (list_length(stmt->objects) > 1)
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot drop distributed server with other servers"),
errhint("Try dropping each object in a separate DROP command")));
}
if (!ShouldPropagate())
{
return NIL;
}
EnsureCoordinator();
Assert(list_length(stmt->objects) == 1);
Value *serverValue = linitial(stmt->objects);
ObjectAddress address = GetObjectAddressByServerName(strVal(serverValue), false);
/* unmark distributed server */
UnmarkObjectDistributed(&address);
const char *deparsedStmt = DeparseTreeNode((Node *) stmt);
/*
* To prevent recursive propagation in mx architecture, we disable ddl
* propagation before sending the command to workers.
*/
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) deparsedStmt,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* PostprocessCreateForeignServerStmt is called after a CREATE SERVER command has
* been executed by standard process utility.
*/
List *
PostprocessCreateForeignServerStmt(Node *node, const char *queryString)
{
bool missingOk = false;
ObjectAddress address = GetObjectAddressFromParseTree(node, missingOk);
EnsureDependenciesExistOnAllNodes(&address);
return NIL;
}
/*
* PostprocessAlterForeignServerOwnerStmt is called after a ALTER SERVER OWNER command
* has been executed by standard process utility.
*/
List *
PostprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString)
{
bool missingOk = false;
ObjectAddress address = GetObjectAddressFromParseTree(node, missingOk);
EnsureDependenciesExistOnAllNodes(&address);
return NIL;
}
/*
* CreateForeignServerStmtObjectAddress finds the ObjectAddress for the server
* that is created by given CreateForeignServerStmt. If missingOk is false and if
* the server does not exist, then it errors out.
*
* Never returns NULL, but the objid in the address can be invalid if missingOk
* was set to true.
*/
ObjectAddress
CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok)
{
CreateForeignServerStmt *stmt = castNode(CreateForeignServerStmt, node);
return GetObjectAddressByServerName(stmt->servername, missing_ok);
}
/*
* AlterForeignServerOwnerStmtObjectAddress finds the ObjectAddress for the server
* given in AlterOwnerStmt. If missingOk is false and if
* the server does not exist, then it errors out.
*
* Never returns NULL, but the objid in the address can be invalid if missingOk
* was set to true.
*/
ObjectAddress
AlterForeignServerOwnerStmtObjectAddress(Node *node, bool missing_ok)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
char *serverName = strVal(stmt->object);
return GetObjectAddressByServerName(serverName, missing_ok);
}
/*
* GetForeignServerCreateDDLCommand returns a list that includes the CREATE SERVER
* command that would recreate the given server on a new node.
*/
List *
GetForeignServerCreateDDLCommand(Oid serverId)
{
/* generate a statement for creation of the server in "if not exists" construct */
Node *stmt = RecreateForeignServerStmt(serverId);
/* capture ddl command for the create statement */
const char *ddlCommand = DeparseTreeNode(stmt);
List *ddlCommands = list_make1((void *) ddlCommand);
return ddlCommands;
}
/*
* RecreateForeignServerStmt returns a parsetree for a CREATE SERVER statement
* that would recreate the given server on a new node.
*/
static Node *
RecreateForeignServerStmt(Oid serverId)
{
ForeignServer *server = GetForeignServer(serverId);
CreateForeignServerStmt *createStmt = makeNode(CreateForeignServerStmt);
/* set server name and if_not_exists fields */
createStmt->servername = pstrdup(server->servername);
createStmt->if_not_exists = true;
/* set foreign data wrapper */
ForeignDataWrapper *fdw = GetForeignDataWrapper(server->fdwid);
createStmt->fdwname = pstrdup(fdw->fdwname);
/* set all fields using the existing server */
if (server->servertype != NULL)
{
createStmt->servertype = pstrdup(server->servertype);
}
if (server->serverversion != NULL)
{
createStmt->version = pstrdup(server->serverversion);
}
createStmt->options = NIL;
int location = -1;
DefElem *option = NULL;
foreach_ptr(option, server->options)
{
DefElem *copyOption = makeDefElem(option->defname, option->arg, location);
createStmt->options = lappend(createStmt->options, copyOption);
}
return (Node *) createStmt;
}
/*
* NameListHasDistributedServer takes a namelist of servers and returns true if at least
* one of them is distributed. Returns false otherwise.
*/
static bool
NameListHasDistributedServer(List *serverNames)
{
Value *serverValue = NULL;
foreach_ptr(serverValue, serverNames)
{
ObjectAddress address = GetObjectAddressByServerName(strVal(serverValue), false);
if (IsObjectDistributed(&address))
{
return true;
}
}
return false;
}
static ObjectAddress
GetObjectAddressByServerName(char *serverName, bool missing_ok)
{
ForeignServer *server = GetForeignServerByName(serverName, missing_ok);
Oid serverOid = server->serverid;
ObjectAddress address = { 0 };
ObjectAddressSet(address, ForeignServerRelationId, serverOid);
return address;
}

View File

@ -77,7 +77,6 @@
static void deparse_index_columns(StringInfo buffer, List *indexParameterList, static void deparse_index_columns(StringInfo buffer, List *indexParameterList,
List *deparseContext); List *deparseContext);
static void AppendOptionListToString(StringInfo stringData, List *options);
static void AppendStorageParametersToString(StringInfo stringBuffer, static void AppendStorageParametersToString(StringInfo stringBuffer,
List *optionList); List *optionList);
static void simple_quote_literal(StringInfo buf, const char *val); static void simple_quote_literal(StringInfo buf, const char *val);
@ -1056,7 +1055,7 @@ generate_qualified_relation_name(Oid relid)
* AppendOptionListToString converts the option list to its textual format, and * AppendOptionListToString converts the option list to its textual format, and
* appends this text to the given string buffer. * appends this text to the given string buffer.
*/ */
static void void
AppendOptionListToString(StringInfo stringBuffer, List *optionList) AppendOptionListToString(StringInfo stringBuffer, List *optionList)
{ {
if (optionList != NIL) if (optionList != NIL)

View File

@ -0,0 +1,277 @@
/*-------------------------------------------------------------------------
*
* deparse_foreign_server_stmts.c
* All routines to deparse foreign server statements.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "commands/defrem.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/relay_utility.h"
#include "lib/stringinfo.h"
#include "nodes/nodes.h"
#include "utils/builtins.h"
static void AppendCreateForeignServerStmt(StringInfo buf, CreateForeignServerStmt *stmt);
static void AppendAlterForeignServerStmt(StringInfo buf, AlterForeignServerStmt *stmt);
static void AppendAlterForeignServerOptions(StringInfo buf, AlterForeignServerStmt *stmt);
static void AppendAlterForeignServerRenameStmt(StringInfo buf, RenameStmt *stmt);
static void AppendAlterForeignServerOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt);
static void AppendDropForeignServerStmt(StringInfo buf, DropStmt *stmt);
static void AppendServerNames(StringInfo buf, DropStmt *stmt);
static void AppendBehavior(StringInfo buf, DropStmt *stmt);
static char * GetDefElemActionString(DefElemAction action);
char *
DeparseCreateForeignServerStmt(Node *node)
{
CreateForeignServerStmt *stmt = castNode(CreateForeignServerStmt, node);
StringInfoData str;
initStringInfo(&str);
AppendCreateForeignServerStmt(&str, stmt);
return str.data;
}
char *
DeparseAlterForeignServerStmt(Node *node)
{
AlterForeignServerStmt *stmt = castNode(AlterForeignServerStmt, node);
StringInfoData str;
initStringInfo(&str);
AppendAlterForeignServerStmt(&str, stmt);
return str.data;
}
char *
DeparseAlterForeignServerRenameStmt(Node *node)
{
RenameStmt *stmt = castNode(RenameStmt, node);
Assert(stmt->renameType == OBJECT_FOREIGN_SERVER);
StringInfoData str;
initStringInfo(&str);
AppendAlterForeignServerRenameStmt(&str, stmt);
return str.data;
}
char *
DeparseAlterForeignServerOwnerStmt(Node *node)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_FOREIGN_SERVER);
StringInfoData str;
initStringInfo(&str);
AppendAlterForeignServerOwnerStmt(&str, stmt);
return str.data;
}
char *
DeparseDropForeignServerStmt(Node *node)
{
DropStmt *stmt = castNode(DropStmt, node);
Assert(stmt->removeType == OBJECT_FOREIGN_SERVER);
StringInfoData str;
initStringInfo(&str);
AppendDropForeignServerStmt(&str, stmt);
return str.data;
}
static void
AppendCreateForeignServerStmt(StringInfo buf, CreateForeignServerStmt *stmt)
{
appendStringInfoString(buf, "CREATE SERVER ");
if (stmt->if_not_exists)
{
appendStringInfoString(buf, "IF NOT EXISTS ");
}
appendStringInfo(buf, "%s ", quote_identifier(stmt->servername));
if (stmt->servertype)
{
appendStringInfo(buf, "TYPE %s ", quote_literal_cstr(stmt->servertype));
}
if (stmt->version)
{
appendStringInfo(buf, "VERSION %s ", quote_literal_cstr(stmt->version));
}
appendStringInfo(buf, "FOREIGN DATA WRAPPER %s ", quote_identifier(stmt->fdwname));
AppendOptionListToString(buf, stmt->options);
}
static void
AppendAlterForeignServerStmt(StringInfo buf, AlterForeignServerStmt *stmt)
{
appendStringInfo(buf, "ALTER SERVER %s ", quote_identifier(stmt->servername));
if (stmt->has_version)
{
appendStringInfo(buf, "VERSION %s ", quote_literal_cstr(stmt->version));
}
AppendAlterForeignServerOptions(buf, stmt);
}
static void
AppendAlterForeignServerOptions(StringInfo buf, AlterForeignServerStmt *stmt)
{
if (list_length(stmt->options) <= 0)
{
return;
}
appendStringInfoString(buf, "OPTIONS (");
DefElemAction action = DEFELEM_UNSPEC;
DefElem *def = NULL;
foreach_ptr(def, stmt->options)
{
if (def->defaction != DEFELEM_UNSPEC)
{
action = def->defaction;
char *actionString = GetDefElemActionString(action);
appendStringInfo(buf, "%s ", actionString);
}
appendStringInfo(buf, "%s", quote_identifier(def->defname));
if (action != DEFELEM_DROP)
{
const char *value = quote_literal_cstr(defGetString(def));
appendStringInfo(buf, " %s", value);
}
if (def != llast(stmt->options))
{
appendStringInfoString(buf, ", ");
}
}
appendStringInfoString(buf, ")");
}
static void
AppendAlterForeignServerRenameStmt(StringInfo buf, RenameStmt *stmt)
{
appendStringInfo(buf, "ALTER SERVER %s RENAME TO %s",
quote_identifier(strVal(stmt->object)),
quote_identifier(stmt->newname));
}
static void
AppendAlterForeignServerOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt)
{
const char *servername = quote_identifier(strVal(stmt->object));
appendStringInfo(buf, "ALTER SERVER %s OWNER TO ", servername);
appendStringInfo(buf, "%s", RoleSpecString(stmt->newowner, true));
}
static void
AppendDropForeignServerStmt(StringInfo buf, DropStmt *stmt)
{
appendStringInfoString(buf, "DROP SERVER ");
if (stmt->missing_ok)
{
appendStringInfoString(buf, "IF EXISTS ");
}
AppendServerNames(buf, stmt);
AppendBehavior(buf, stmt);
}
static void
AppendServerNames(StringInfo buf, DropStmt *stmt)
{
Value *serverValue = NULL;
foreach_ptr(serverValue, stmt->objects)
{
const char *serverString = quote_identifier(strVal(serverValue));
appendStringInfo(buf, "%s", serverString);
if (serverValue != llast(stmt->objects))
{
appendStringInfoString(buf, ", ");
}
}
}
static void
AppendBehavior(StringInfo buf, DropStmt *stmt)
{
if (stmt->behavior == DROP_CASCADE)
{
appendStringInfoString(buf, " CASCADE");
}
else if (stmt->behavior == DROP_RESTRICT)
{
appendStringInfoString(buf, " RESTRICT");
}
}
static char *
GetDefElemActionString(DefElemAction action)
{
switch (action)
{
case DEFELEM_ADD:
{
return "ADD";
}
case DEFELEM_SET:
{
return "SET";
}
case DEFELEM_DROP:
{
return "DROP";
}
default:
return "";
}
}

View File

@ -117,7 +117,7 @@ AcquireExecutorShardLocksForRelationRowLockList(List *relationRowLockList)
* We have selected these lock types according to conflict table given in the * We have selected these lock types according to conflict table given in the
* Postgres documentation. It is given that FOR UPDATE and FOR NO KEY UPDATE * Postgres documentation. It is given that FOR UPDATE and FOR NO KEY UPDATE
* must be conflict with each other modify command. By getting ExlcusiveLock * must be conflict with each other modify command. By getting ExlcusiveLock
* we guarantee that. Note that, getting ExlusiveLock does not mimic the * we guarantee that. Note that, getting ExclusiveLock does not mimic the
* behaviour of Postgres exactly. Getting row lock with FOR NO KEY UPDATE and * behaviour of Postgres exactly. Getting row lock with FOR NO KEY UPDATE and
* FOR KEY SHARE do not conflict in Postgres, yet they block each other in * FOR KEY SHARE do not conflict in Postgres, yet they block each other in
* our implementation. Since FOR SHARE and FOR KEY SHARE does not conflict * our implementation. Since FOR SHARE and FOR KEY SHARE does not conflict

View File

@ -613,6 +613,11 @@ SupportedDependencyByCitus(const ObjectAddress *address)
return true; return true;
} }
case OCLASS_FOREIGN_SERVER:
{
return true;
}
case OCLASS_ROLE: case OCLASS_ROLE:
{ {
/* /*

View File

@ -4373,6 +4373,13 @@ GetIntervalTypeInfo(char partitionMethod, Var *partitionColumn,
case DISTRIBUTE_BY_APPEND: case DISTRIBUTE_BY_APPEND:
case DISTRIBUTE_BY_RANGE: case DISTRIBUTE_BY_RANGE:
{ {
/* we need a valid partition column Var in this case */
if (partitionColumn == NULL)
{
ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR),
errmsg("unexpected partition column value: null"),
errdetail("Please report this to the Citus core team.")));
}
*intervalTypeId = partitionColumn->vartype; *intervalTypeId = partitionColumn->vartype;
*intervalTypeMod = partitionColumn->vartypmod; *intervalTypeMod = partitionColumn->vartypmod;
break; break;

View File

@ -456,6 +456,14 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr,
break; break;
} }
case OBJECT_FOREIGN_SERVER:
{
idToCheck = addr->objectId;
aclMaskResult = pg_foreign_server_aclmask(idToCheck, userId, ACL_USAGE,
ACLMASK_ANY);
break;
}
case OBJECT_SEQUENCE: case OBJECT_SEQUENCE:
{ {
idToCheck = addr->objectId; idToCheck = addr->objectId;

View File

@ -36,7 +36,7 @@
#include "optimizer/pathnode.h" #include "optimizer/pathnode.h"
static uint32 attributeEquivalenceId = 1; static uint32 AttributeEquivalenceId = 1;
/* /*
@ -292,7 +292,7 @@ SafeToPushdownUnionSubquery(Query *originalQuery,
palloc0(sizeof(AttributeEquivalenceClass)); palloc0(sizeof(AttributeEquivalenceClass));
ListCell *relationRestrictionCell = NULL; ListCell *relationRestrictionCell = NULL;
attributeEquivalence->equivalenceId = attributeEquivalenceId++; attributeEquivalence->equivalenceId = AttributeEquivalenceId++;
/* /*
* Ensure that the partition column is in the same place across all * Ensure that the partition column is in the same place across all
@ -617,7 +617,7 @@ GenerateAllAttributeEquivalences(PlannerRestrictionContext *plannerRestrictionCo
plannerRestrictionContext->joinRestrictionContext; plannerRestrictionContext->joinRestrictionContext;
/* reset the equivalence id counter per call to prevent overflows */ /* reset the equivalence id counter per call to prevent overflows */
attributeEquivalenceId = 1; AttributeEquivalenceId = 1;
List *relationRestrictionAttributeEquivalenceList = List *relationRestrictionAttributeEquivalenceList =
GenerateAttributeEquivalencesForRelationRestrictions(relationRestrictionContext); GenerateAttributeEquivalencesForRelationRestrictions(relationRestrictionContext);
@ -801,7 +801,7 @@ AttributeEquivalenceClassForEquivalenceClass(EquivalenceClass *plannerEqClass,
ListCell *equivilanceMemberCell = NULL; ListCell *equivilanceMemberCell = NULL;
PlannerInfo *plannerInfo = relationRestriction->plannerInfo; PlannerInfo *plannerInfo = relationRestriction->plannerInfo;
attributeEquivalence->equivalenceId = attributeEquivalenceId++; attributeEquivalence->equivalenceId = AttributeEquivalenceId++;
foreach(equivilanceMemberCell, plannerEqClass->ec_members) foreach(equivilanceMemberCell, plannerEqClass->ec_members)
{ {
@ -1183,7 +1183,7 @@ GenerateAttributeEquivalencesForJoinRestrictions(JoinRestrictionContext *
AttributeEquivalenceClass *attributeEquivalence = palloc0( AttributeEquivalenceClass *attributeEquivalence = palloc0(
sizeof(AttributeEquivalenceClass)); sizeof(AttributeEquivalenceClass));
attributeEquivalence->equivalenceId = attributeEquivalenceId++; attributeEquivalence->equivalenceId = AttributeEquivalenceId++;
AddToAttributeEquivalenceClass(attributeEquivalence, AddToAttributeEquivalenceClass(attributeEquivalence,
joinRestriction->plannerInfo, leftVar); joinRestriction->plannerInfo, leftVar);

View File

@ -7,6 +7,7 @@
#include "udfs/citus_check_cluster_node_health/11.0-1.sql" #include "udfs/citus_check_cluster_node_health/11.0-1.sql"
#include "udfs/citus_internal_add_object_metadata/11.0-1.sql" #include "udfs/citus_internal_add_object_metadata/11.0-1.sql"
#include "udfs/citus_run_local_command/11.0-1.sql"
DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text); DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text);
DROP FUNCTION pg_catalog.master_get_table_metadata(text); DROP FUNCTION pg_catalog.master_get_table_metadata(text);
@ -42,4 +43,17 @@ CREATE FUNCTION worker_drop_distributed_table_metadata_only(table_oid oid)
LANGUAGE C STRICT LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$worker_drop_distributed_table_metadata_only$$; AS 'MODULE_PATHNAME', $$worker_drop_distributed_table_metadata_only$$;
COMMENT ON FUNCTION worker_drop_distributed_table_metadata_only(table_oid oid) COMMENT ON FUNCTION worker_drop_distributed_table_metadata_only(table_oid oid)
IS 'drops the metadata of the given table oid'; IS 'drops the metadata of the given table oid';
-- Here we keep track of partitioned tables that exists before Citus 11
-- where we need to call fix_all_partition_shard_index_names() before
-- metadata is synced. Note that after citus-11, we automatically
-- adjust the indexes so we only need to fix existing indexes
DO LANGUAGE plpgsql
$$
DECLARE
partitioned_table_exists bool :=false;
BEGIN
SELECT count(*) > 0 INTO partitioned_table_exists FROM pg_dist_partition p JOIN pg_class c ON p.logicalrelid = c.oid WHERE c.relkind = 'p';
UPDATE pg_dist_node_metadata SET metadata=jsonb_set(metadata, '{partitioned_citus_table_exists_pre_11}', to_jsonb(partitioned_table_exists), true);
END;
$$;

View File

@ -44,3 +44,4 @@ DROP FUNCTION pg_catalog.citus_check_connection_to_node (text, integer);
DROP FUNCTION pg_catalog.citus_check_cluster_node_health (); DROP FUNCTION pg_catalog.citus_check_cluster_node_health ();
DROP FUNCTION pg_catalog.citus_internal_add_object_metadata(text, text[], text[], integer, integer); DROP FUNCTION pg_catalog.citus_internal_add_object_metadata(text, text[], text[], integer, integer);
DROP FUNCTION pg_catalog.citus_run_local_command(text);

View File

@ -0,0 +1,8 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_run_local_command(command text)
RETURNS void AS $$
BEGIN
EXECUTE $1;
END;
$$ LANGUAGE PLPGSQL;
COMMENT ON FUNCTION pg_catalog.citus_run_local_command(text)
IS 'citus_run_local_command executes the input command';

View File

@ -0,0 +1,8 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_run_local_command(command text)
RETURNS void AS $$
BEGIN
EXECUTE $1;
END;
$$ LANGUAGE PLPGSQL;
COMMENT ON FUNCTION pg_catalog.citus_run_local_command(text)
IS 'citus_run_local_command executes the input command';

View File

@ -183,7 +183,7 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
* distributed transactions. * distributed transactions.
* *
* We could avoid this by temporarily blocking new prepared transactions * We could avoid this by temporarily blocking new prepared transactions
* from being created by taking an ExlusiveLock on pg_dist_transaction. * from being created by taking an ExclusiveLock on pg_dist_transaction.
* However, this hurts write performance, so instead we avoid blocking * However, this hurts write performance, so instead we avoid blocking
* by consulting the list of active distributed transactions, and follow * by consulting the list of active distributed transactions, and follow
* a carefully chosen order to avoid race conditions: * a carefully chosen order to avoid race conditions:

View File

@ -569,7 +569,23 @@ CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId, Oid partitionRel
task->taskId = taskId++; task->taskId = taskId++;
task->taskType = DDL_TASK; task->taskType = DDL_TASK;
SetTaskQueryStringList(task, queryStringList);
/*
* There could be O(#partitions * #indexes) queries in
* the queryStringList.
*
* In order to avoid round-trips per query in queryStringList,
* we join the string and send as a single command via the UDF.
* Otherwise, the executor sends each command with one
* round-trip.
*/
char *string = StringJoin(queryStringList, ';');
StringInfo commandToRun = makeStringInfo();
appendStringInfo(commandToRun,
"SELECT pg_catalog.citus_run_local_command($$%s$$)", string);
SetTaskQueryString(task, commandToRun->data);
task->dependentTaskList = NULL; task->dependentTaskList = NULL;
task->replicationModel = REPLICATION_MODEL_INVALID; task->replicationModel = REPLICATION_MODEL_INVALID;
task->anchorShardId = parentShardId; task->anchorShardId = parentShardId;

View File

@ -65,6 +65,7 @@ extern char * generate_relation_name(Oid relid, List *namespaces);
extern char * generate_qualified_relation_name(Oid relid); extern char * generate_qualified_relation_name(Oid relid);
extern char * generate_operator_name(Oid operid, Oid arg1, Oid arg2); extern char * generate_operator_name(Oid operid, Oid arg1, Oid arg2);
extern List * getOwnedSequences_internal(Oid relid, AttrNumber attnum, char deptype); extern List * getOwnedSequences_internal(Oid relid, AttrNumber attnum, char deptype);
extern void AppendOptionListToString(StringInfo stringData, List *options);
#endif /* CITUS_RULEUTILS_H */ #endif /* CITUS_RULEUTILS_H */

View File

@ -225,6 +225,27 @@ extern Oid GetReferencedTableId(Oid foreignKeyId);
extern Oid GetReferencingTableId(Oid foreignKeyId); extern Oid GetReferencingTableId(Oid foreignKeyId);
extern bool RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId); extern bool RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId);
/* foreign_server.c - forward declarations */
extern List * PreprocessCreateForeignServerStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessAlterForeignServerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern List * PreprocessRenameForeignServerStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PreprocessDropForeignServerStmt(Node *node, const char *queryString,
ProcessUtilityContext
processUtilityContext);
extern List * PostprocessCreateForeignServerStmt(Node *node, const char *queryString);
extern List * PostprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString);
extern ObjectAddress CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok);
extern ObjectAddress AlterForeignServerOwnerStmtObjectAddress(Node *node, bool
missing_ok);
extern List * GetForeignServerCreateDDLCommand(Oid serverId);
/* function.c - forward declarations */ /* function.c - forward declarations */
extern List * PreprocessCreateFunctionStmt(Node *stmt, const char *queryString, extern List * PreprocessCreateFunctionStmt(Node *stmt, const char *queryString,

View File

@ -46,6 +46,13 @@ extern void QualifyRenameCollationStmt(Node *stmt);
extern void QualifyAlterCollationSchemaStmt(Node *stmt); extern void QualifyAlterCollationSchemaStmt(Node *stmt);
extern void QualifyAlterCollationOwnerStmt(Node *stmt); extern void QualifyAlterCollationOwnerStmt(Node *stmt);
/* forward declarations for deparse_foreign_server_stmts.c */
extern char * DeparseCreateForeignServerStmt(Node *node);
extern char * DeparseAlterForeignServerStmt(Node *node);
extern char * DeparseAlterForeignServerRenameStmt(Node *node);
extern char * DeparseAlterForeignServerOwnerStmt(Node *node);
extern char * DeparseDropForeignServerStmt(Node *node);
/* forward declarations for deparse_table_stmts.c */ /* forward declarations for deparse_table_stmts.c */
extern char * DeparseAlterTableSchemaStmt(Node *stmt); extern char * DeparseAlterTableSchemaStmt(Node *stmt);
extern char * DeparseAlterTableStmt(Node *node); extern char * DeparseAlterTableStmt(Node *node);

View File

@ -1,8 +1,8 @@
# ---------- # ----------
# Only run few basic tests to set up a testing environment # Only run few basic tests to set up a testing environment
# ---------- # ----------
test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw columnar_test_helpers failure_test_helpers
test: multi_cluster_management test: multi_cluster_management
test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw columnar_test_helpers
test: multi_test_catalog_views test: multi_test_catalog_views
test: multi_create_table multi_behavioral_analytics_create_table test: multi_create_table multi_behavioral_analytics_create_table
test: multi_create_table_superuser multi_behavioral_analytics_create_table_superuser test: multi_create_table_superuser multi_behavioral_analytics_create_table_superuser

View File

@ -457,7 +457,7 @@ ALTER TABLE par_another_citus_local_table ADD CONSTRAINT fkey_self FOREIGN KEY(v
ALTER TABLE par_citus_local_table ADD CONSTRAINT fkey_c_to_c FOREIGN KEY(val) REFERENCES par_another_citus_local_table(val) ON UPDATE CASCADE; ALTER TABLE par_citus_local_table ADD CONSTRAINT fkey_c_to_c FOREIGN KEY(val) REFERENCES par_another_citus_local_table(val) ON UPDATE CASCADE;
SELECT citus_add_local_table_to_metadata('par_another_citus_local_table', cascade_via_foreign_keys=>true); SELECT citus_add_local_table_to_metadata('par_another_citus_local_table', cascade_via_foreign_keys=>true);
NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507011, 'citus_local_table_triggers', 1507012, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_another_citus_local_table ATTACH PARTITION citus_local_table_triggers.par_another_citus_local_table_1 FOR VALUES FROM (1) TO (10000);') NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507011, 'citus_local_table_triggers', 1507012, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_another_citus_local_table ATTACH PARTITION citus_local_table_triggers.par_another_citus_local_table_1 FOR VALUES FROM (1) TO (10000);')
NOTICE: executing the command locally: SELECT worker_fix_partition_shard_index_names('citus_local_table_triggers.par_another_citus_local_table_val_key_1507011'::regclass, 'citus_local_table_triggers.par_another_citus_local_table_1_1507012', 'par_another_citus_local_table_1_val_key_1507012') NOTICE: executing the command locally: SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('citus_local_table_triggers.par_another_citus_local_table_val_key_1507011'::regclass, 'citus_local_table_triggers.par_another_citus_local_table_1_1507012', 'par_another_citus_local_table_1_val_key_1507012')$$)
NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507013, 'citus_local_table_triggers', 1507014, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_citus_local_table ATTACH PARTITION citus_local_table_triggers.par_citus_local_table_1 FOR VALUES FROM (1) TO (10000);') NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507013, 'citus_local_table_triggers', 1507014, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_citus_local_table ATTACH PARTITION citus_local_table_triggers.par_citus_local_table_1 FOR VALUES FROM (1) TO (10000);')
NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507011, 'citus_local_table_triggers', 1507011, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_another_citus_local_table ADD CONSTRAINT fkey_self FOREIGN KEY (val) REFERENCES citus_local_table_triggers.par_another_citus_local_table(val)') NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507011, 'citus_local_table_triggers', 1507011, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_another_citus_local_table ADD CONSTRAINT fkey_self FOREIGN KEY (val) REFERENCES citus_local_table_triggers.par_another_citus_local_table(val)')
NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507013, 'citus_local_table_triggers', 1507011, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_citus_local_table ADD CONSTRAINT fkey_c_to_c FOREIGN KEY (val) REFERENCES citus_local_table_triggers.par_another_citus_local_table(val) ON UPDATE CASCADE') NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1507013, 'citus_local_table_triggers', 1507011, 'citus_local_table_triggers', 'ALTER TABLE citus_local_table_triggers.par_citus_local_table ADD CONSTRAINT fkey_c_to_c FOREIGN KEY (val) REFERENCES citus_local_table_triggers.par_another_citus_local_table(val) ON UPDATE CASCADE')

View File

@ -0,0 +1,13 @@
SELECT citus_run_local_command($$SELECT 1; SELECT 1$$);
citus_run_local_command
---------------------------------------------------------------------
(1 row)
SELECT citus_run_local_command($$SELECT 1; SELECT 1/0$$);
ERROR: division by zero
CONTEXT: SQL statement "SELECT 1; SELECT 1/0"
PL/pgSQL function citus_run_local_command(text) line XX at EXECUTE
SELECT citus_run_local_command(NULL);
ERROR: query string argument of EXECUTE is null
CONTEXT: PL/pgSQL function citus_run_local_command(text) line XX at EXECUTE

View File

@ -627,8 +627,8 @@ CREATE INDEX ix_test_index_creation5 ON test_index_creation1
INCLUDE (field1) WHERE (tenant_id = 100); INCLUDE (field1) WHERE (tenant_id = 100);
NOTICE: executing the command locally: CREATE INDEX ix_test_index_creation5_1503042 ON coordinator_shouldhaveshards.test_index_creation1_1503042 USING btree (tenant_id ,timeperiod ) INCLUDE (field1 ) WHERE (tenant_id = 100) NOTICE: executing the command locally: CREATE INDEX ix_test_index_creation5_1503042 ON coordinator_shouldhaveshards.test_index_creation1_1503042 USING btree (tenant_id ,timeperiod ) INCLUDE (field1 ) WHERE (tenant_id = 100)
NOTICE: executing the command locally: CREATE INDEX ix_test_index_creation5_1503045 ON coordinator_shouldhaveshards.test_index_creation1_1503045 USING btree (tenant_id ,timeperiod ) INCLUDE (field1 ) WHERE (tenant_id = 100) NOTICE: executing the command locally: CREATE INDEX ix_test_index_creation5_1503045 ON coordinator_shouldhaveshards.test_index_creation1_1503045 USING btree (tenant_id ,timeperiod ) INCLUDE (field1 ) WHERE (tenant_id = 100)
NOTICE: executing the command locally: SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503048', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503048');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503049', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503049');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503050', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503050');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503051', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503051');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503052', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503052');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503053', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503053');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503054', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503054');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503055', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503055');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503056', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503056');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503057', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503057');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503058', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503058');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503059', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503059') NOTICE: executing the command locally: SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503048', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503048');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503049', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503049');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503050', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503050');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503051', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503051');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503052', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503052');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503053', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503053');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503054', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503054');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503055', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503055');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503056', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503056');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503057', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503057');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503058', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503058');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503042'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503059', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503059')$$)
NOTICE: executing the command locally: SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503048', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503048');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503049', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503049');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503050', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503050');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503051', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503051');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503052', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503052');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503053', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503053');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503054', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503054');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503055', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503055');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503056', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503056');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503057', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503057');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503058', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503058');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503059', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503059') NOTICE: executing the command locally: SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503048', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503048');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503049', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503049');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503050', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503050');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503051', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503051');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503052', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503052');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_26_1503053', 'test_index_creation1_p2020_09_2_tenant_id_time_6020e8f8_1503053');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503054', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503054');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503055', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503055');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503056', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503056');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503057', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503057');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503058', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503058');SELECT worker_fix_partition_shard_index_names('coordinator_shouldhaveshards.ix_test_index_creation5_1503045'::regclass, 'coordinator_shouldhaveshards.test_index_creation1_p2020_09_27_1503059', 'test_index_creation1_p2020_09__tenant_id_timep_624f7e94_1503059')$$)
-- test if indexes are created -- test if indexes are created
SELECT 1 AS created WHERE EXISTS(SELECT * FROM pg_indexes WHERE indexname LIKE '%test_index_creation%'); SELECT 1 AS created WHERE EXISTS(SELECT * FROM pg_indexes WHERE indexname LIKE '%test_index_creation%');
created created

View File

@ -612,7 +612,7 @@ SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', dist
SELECT * FROM (SELECT unnest(master_metadata_snapshot()) as metadata_command order by 1) as innerResult WHERE metadata_command like '%distributed_object_data%'; SELECT * FROM (SELECT unnest(master_metadata_snapshot()) as metadata_command order by 1) as innerResult WHERE metadata_command like '%distributed_object_data%';
metadata_command metadata_command
--------------------------------------------------------------------- ---------------------------------------------------------------------
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('type', ARRAY['public.usage_access_type']::text[], ARRAY[]::text[], -1, 0), ('type', ARRAY['function_tests.dup_result']::text[], ARRAY[]::text[], -1, 0), ('function', ARRAY['public', 'usage_access_func']::text[], ARRAY['public.usage_access_type', 'integer[]']::text[], -1, 0), ('function', ARRAY['public', 'usage_access_func_third']::text[], ARRAY['integer', 'integer[]']::text[], 0, 50), ('function', ARRAY['function_tests', 'notice']::text[], ARRAY['pg_catalog.text']::text[], -1, 0), ('function', ARRAY['function_tests', 'dup']::text[], ARRAY['pg_catalog.macaddr']::text[], 0, 52), ('function', ARRAY['function_tests', 'eq_with_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], 0, 52), ('function', ARRAY['function_tests', 'eq_mi''xed_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_sfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_invfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_finalfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('aggregate', ARRAY['function_tests', 'my_rank']::text[], ARRAY['pg_catalog."any"']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_names_sfunc']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_names_finalfunc']::text[], ARRAY['function_tests.dup_result']::text[], -1, 0), ('aggregate', ARRAY['function_tests', 'agg_names']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0), ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['schema_colocation']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['function_tests']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['function_tests2']::text[], ARRAY[]::text[], -1, 0), ('extension', ARRAY['plpgsql']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('type', ARRAY['public.usage_access_type']::text[], ARRAY[]::text[], -1, 0), ('type', ARRAY['function_tests.dup_result']::text[], ARRAY[]::text[], -1, 0), ('function', ARRAY['public', 'usage_access_func']::text[], ARRAY['public.usage_access_type', 'integer[]']::text[], -1, 0), ('function', ARRAY['public', 'usage_access_func_third']::text[], ARRAY['integer', 'integer[]']::text[], 0, 50), ('function', ARRAY['function_tests', 'notice']::text[], ARRAY['pg_catalog.text']::text[], -1, 0), ('function', ARRAY['function_tests', 'dup']::text[], ARRAY['pg_catalog.macaddr']::text[], 0, 52), ('function', ARRAY['function_tests', 'eq_with_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], 0, 52), ('function', ARRAY['function_tests', 'eq_mi''xed_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_sfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_invfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_finalfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0), ('aggregate', ARRAY['function_tests', 'my_rank']::text[], ARRAY['pg_catalog."any"']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_names_sfunc']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0), ('function', ARRAY['function_tests', 'agg_names_finalfunc']::text[], ARRAY['function_tests.dup_result']::text[], -1, 0), ('aggregate', ARRAY['function_tests', 'agg_names']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0), ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0), ('server', ARRAY['fake_fdw_server']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['schema_colocation']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['function_tests']::text[], ARRAY[]::text[], -1, 0), ('schema', ARRAY['function_tests2']::text[], ARRAY[]::text[], -1, 0), ('extension', ARRAY['plpgsql']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
(1 row) (1 row)
-- valid distribution with distribution_arg_index -- valid distribution with distribution_arg_index

View File

@ -105,10 +105,10 @@ SELECT name FROM r1 WHERE id = 2;
-- verify a connection attempt was made to the intercepted node, this would have cause the -- verify a connection attempt was made to the intercepted node, this would have cause the
-- connection to have been delayed and thus caused a timeout -- connection to have been delayed and thus caused a timeout
SELECT citus.dump_network_traffic(); SELECT * FROM citus.dump_network_traffic() WHERE conn=0;
dump_network_traffic conn | source | message
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0,coordinator,"[initial message]") 0 | coordinator | [initial message]
(1 row) (1 row)
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');

View File

@ -0,0 +1,17 @@
Parsed test spec with 1 sessions
starting permutation: check_mx
step check_mx:
SHOW citus.enable_metadata_sync_by_default;
SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary';
citus.enable_metadata_sync_by_default
---------------------------------------------------------------------
on
(1 row)
bool_and
---------------------------------------------------------------------
t
(1 row)

View File

@ -62,6 +62,15 @@ RETURNS fdw_handler
AS 'citus' AS 'citus'
LANGUAGE C STRICT; LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER fake_fdw_1 HANDLER fake_fdw_handler; CREATE FOREIGN DATA WRAPPER fake_fdw_1 HANDLER fake_fdw_handler;
SELECT run_command_on_workers($$
CREATE FOREIGN DATA WRAPPER fake_fdw_1 HANDLER fake_fdw_handler;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE FOREIGN DATA WRAPPER")
(localhost,57638,t,"CREATE FOREIGN DATA WRAPPER")
(2 rows)
CREATE SERVER fake_fdw_server_1 FOREIGN DATA WRAPPER fake_fdw_1; CREATE SERVER fake_fdw_server_1 FOREIGN DATA WRAPPER fake_fdw_1;
CREATE FOREIGN TABLE foreign_table ( CREATE FOREIGN TABLE foreign_table (
key int, key int,

View File

@ -131,4 +131,5 @@ SELECT master_copy_shard_placement(
transfer_mode := 'block_writes'); transfer_mode := 'block_writes');
ERROR: Table 'mx_table' is streaming replicated. Shards of streaming replicated tables cannot be copied ERROR: Table 'mx_table' is streaming replicated. Shards of streaming replicated tables cannot be copied
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP TABLE mcsp.history;
DROP SCHEMA mcsp CASCADE; DROP SCHEMA mcsp CASCADE;

View File

@ -425,20 +425,20 @@ SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDE
ALTER EXTENSION citus UPDATE TO '9.4-2'; ALTER EXTENSION citus UPDATE TO '9.4-2';
-- should see the old source code -- should see the old source code
SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1;
prosrc prosrc
--------------------------------------------------------------------- ---------------------------------------------------------------------
+ +
DECLARE + DECLARE +
colocated_tables regclass[]; + colocated_tables regclass[]; +
BEGIN + BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+ SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM + PERFORM +
master_update_shard_statistics(shardid) + master_update_shard_statistics(shardid) +
FROM + FROM +
pg_dist_shard + pg_dist_shard +
WHERE + WHERE +
logicalrelid = ANY (colocated_tables); + logicalrelid = ANY (colocated_tables); +
END; + END; +
(1 row) (1 row)
@ -466,20 +466,20 @@ SELECT * FROM multi_extension.print_extension_changes();
ALTER EXTENSION citus UPDATE TO '9.4-1'; ALTER EXTENSION citus UPDATE TO '9.4-1';
-- should see the old source code -- should see the old source code
SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1;
prosrc prosrc
--------------------------------------------------------------------- ---------------------------------------------------------------------
+ +
DECLARE + DECLARE +
colocated_tables regclass[]; + colocated_tables regclass[]; +
BEGIN + BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+ SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM + PERFORM +
master_update_shard_statistics(shardid) + master_update_shard_statistics(shardid) +
FROM + FROM +
pg_dist_shard + pg_dist_shard +
WHERE + WHERE +
logicalrelid = ANY (colocated_tables); + logicalrelid = ANY (colocated_tables); +
END; + END; +
(1 row) (1 row)
@ -573,20 +573,20 @@ SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDE
ALTER EXTENSION citus UPDATE TO '9.5-2'; ALTER EXTENSION citus UPDATE TO '9.5-2';
-- should see the old source code -- should see the old source code
SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1;
prosrc prosrc
--------------------------------------------------------------------- ---------------------------------------------------------------------
+ +
DECLARE + DECLARE +
colocated_tables regclass[]; + colocated_tables regclass[]; +
BEGIN + BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+ SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM + PERFORM +
master_update_shard_statistics(shardid) + master_update_shard_statistics(shardid) +
FROM + FROM +
pg_dist_shard + pg_dist_shard +
WHERE + WHERE +
logicalrelid = ANY (colocated_tables); + logicalrelid = ANY (colocated_tables); +
END; + END; +
(1 row) (1 row)
@ -614,20 +614,20 @@ SELECT * FROM multi_extension.print_extension_changes();
ALTER EXTENSION citus UPDATE TO '9.5-1'; ALTER EXTENSION citus UPDATE TO '9.5-1';
-- should see the old source code -- should see the old source code
SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1;
prosrc prosrc
--------------------------------------------------------------------- ---------------------------------------------------------------------
+ +
DECLARE + DECLARE +
colocated_tables regclass[]; + colocated_tables regclass[]; +
BEGIN + BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+ SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM + PERFORM +
master_update_shard_statistics(shardid) + master_update_shard_statistics(shardid) +
FROM + FROM +
pg_dist_shard + pg_dist_shard +
WHERE + WHERE +
logicalrelid = ANY (colocated_tables); + logicalrelid = ANY (colocated_tables); +
END; + END; +
(1 row) (1 row)
@ -955,8 +955,35 @@ ERROR: cstore_fdw tables are deprecated as of Citus 11.0
HINT: Install Citus 10.2 and convert your cstore_fdw tables to the columnar access method before upgrading further HINT: Install Citus 10.2 and convert your cstore_fdw tables to the columnar access method before upgrading further
CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE
DELETE FROM pg_dist_shard WHERE shardid = 1; DELETE FROM pg_dist_shard WHERE shardid = 1;
-- partitioned table count is tracked on Citus 11 upgrade
CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id);
CREATE TABLE orders_2020_07_01
PARTITION OF e_transactions FOR VALUES IN (1,2,3);
INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', NULL, 7, 's');
SELECT
(metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11,
(metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null
FROM
pg_dist_node_metadata;
partitioned_citus_table_exists_pre_11 | is_null
---------------------------------------------------------------------
| t
(1 row)
-- Test downgrade to 10.2-4 from 11.0-1 -- Test downgrade to 10.2-4 from 11.0-1
ALTER EXTENSION citus UPDATE TO '11.0-1'; ALTER EXTENSION citus UPDATE TO '11.0-1';
SELECT
(metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11,
(metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null
FROM
pg_dist_node_metadata;
partitioned_citus_table_exists_pre_11 | is_null
---------------------------------------------------------------------
t | f
(1 row)
DELETE FROM pg_dist_partition WHERE logicalrelid = 'e_transactions'::regclass;
DROP TABLE e_transactions;
ALTER EXTENSION citus UPDATE TO '10.2-4'; ALTER EXTENSION citus UPDATE TO '10.2-4';
-- Should be empty result since upgrade+downgrade should be a no-op -- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes(); SELECT * FROM multi_extension.print_extension_changes();
@ -967,7 +994,7 @@ SELECT * FROM multi_extension.print_extension_changes();
-- Snapshot of state at 11.0-1 -- Snapshot of state at 11.0-1
ALTER EXTENSION citus UPDATE TO '11.0-1'; ALTER EXTENSION citus UPDATE TO '11.0-1';
SELECT * FROM multi_extension.print_extension_changes(); SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object previous_object | current_object
--------------------------------------------------------------------- ---------------------------------------------------------------------
function citus_disable_node(text,integer) void | function citus_disable_node(text,integer) void |
function master_append_table_to_shard(bigint,text,text,integer) real | function master_append_table_to_shard(bigint,text,text,integer) real |
@ -977,7 +1004,8 @@ SELECT * FROM multi_extension.print_extension_changes();
| function citus_check_connection_to_node(text,integer) boolean | function citus_check_connection_to_node(text,integer) boolean
| function citus_disable_node(text,integer,boolean) void | function citus_disable_node(text,integer,boolean) void
| function citus_internal_add_object_metadata(text,text[],text[],integer,integer) void | function citus_internal_add_object_metadata(text,text[],text[],integer,integer) void
(8 rows) | function citus_run_local_command(text) void
(9 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version -- show running version

View File

@ -564,7 +564,7 @@ NOTICE: issuing CREATE INDEX i4 ON parent_table(dist_col);
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE INDEX i4_915000 ON fix_idx_names.parent_table_915000 USING btree (dist_col ) NOTICE: issuing CREATE INDEX i4_915000 ON fix_idx_names.parent_table_915000 USING btree (dist_col )
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i4_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_dist_col_idx3_915001') NOTICE: issuing SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('fix_idx_names.i4_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_dist_col_idx3_915001')$$)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
@ -594,7 +594,7 @@ NOTICE: issuing ALTER TABLE parent_table ADD CONSTRAINT pkey_cst PRIMARY KEY (d
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_apply_shard_ddl_command (915000, 'fix_idx_names', 'ALTER TABLE parent_table ADD CONSTRAINT pkey_cst PRIMARY KEY (dist_col, partition_col);') NOTICE: issuing SELECT worker_apply_shard_ddl_command (915000, 'fix_idx_names', 'ALTER TABLE parent_table ADD CONSTRAINT pkey_cst PRIMARY KEY (dist_col, partition_col);')
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.pkey_cst_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_pkey_915001') NOTICE: issuing SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('fix_idx_names.pkey_cst_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_pkey_915001')$$)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
@ -623,7 +623,7 @@ NOTICE: issuing ALTER TABLE parent_table ADD CONSTRAINT unique_cst UNIQUE (dist
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_apply_shard_ddl_command (915000, 'fix_idx_names', 'ALTER TABLE parent_table ADD CONSTRAINT unique_cst UNIQUE (dist_col, partition_col);') NOTICE: issuing SELECT worker_apply_shard_ddl_command (915000, 'fix_idx_names', 'ALTER TABLE parent_table ADD CONSTRAINT unique_cst UNIQUE (dist_col, partition_col);')
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.unique_cst_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_dist_col_partition_col_key_915001') NOTICE: issuing SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('fix_idx_names.unique_cst_915000'::regclass, 'fix_idx_names.p1_915001', 'p1_dist_col_partition_col_key_915001')$$)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
@ -700,17 +700,7 @@ NOTICE: issuing ALTER TABLE parent_table ATTACH PARTITION p2 FOR VALUES FROM ('
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_apply_inter_shard_ddl_command (915000, 'fix_idx_names', 915002, 'fix_idx_names', 'ALTER TABLE parent_table ATTACH PARTITION p2 FOR VALUES FROM (''2019-01-01'') TO (''2020-01-01'');') NOTICE: issuing SELECT worker_apply_inter_shard_ddl_command (915000, 'fix_idx_names', 915002, 'fix_idx_names', 'ALTER TABLE parent_table ATTACH PARTITION p2 FOR VALUES FROM (''2019-01-01'') TO (''2020-01-01'');')
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i1_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx_915002') NOTICE: issuing SELECT pg_catalog.citus_run_local_command($$SELECT worker_fix_partition_shard_index_names('fix_idx_names.i1_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx_915002');SELECT worker_fix_partition_shard_index_names('fix_idx_names.i2_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx1_915002');SELECT worker_fix_partition_shard_index_names('fix_idx_names.i3_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx2_915002');SELECT worker_fix_partition_shard_index_names('fix_idx_names.i4_renamed_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx3_915002');SELECT worker_fix_partition_shard_index_names('fix_idx_names.pkey_cst_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_pkey_915002');SELECT worker_fix_partition_shard_index_names('fix_idx_names.unique_cst_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_partition_col_key_915002')$$)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i2_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx1_915002')
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i3_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx2_915002')
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.i4_renamed_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_idx3_915002')
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.pkey_cst_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_pkey_915002')
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT worker_fix_partition_shard_index_names('fix_idx_names.unique_cst_915000'::regclass, 'fix_idx_names.p2_915002', 'p2_dist_col_partition_col_key_915002')
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx

View File

@ -3,6 +3,21 @@
-- --
-- Tests for metadata snapshot functions, metadata syncing functions and propagation of -- Tests for metadata snapshot functions, metadata syncing functions and propagation of
-- metadata changes to MX tables. -- metadata changes to MX tables.
-- Turn metadata sync off at first
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
NOTICE: dropping metadata on the node (localhost,57637)
stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
NOTICE: dropping metadata on the node (localhost,57638)
stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
SET citus.replicate_reference_tables_on_activate TO off; SET citus.replicate_reference_tables_on_activate TO off;
SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id
@ -1467,13 +1482,14 @@ WHERE logicalrelid='mx_ref'::regclass;
(1 row) (1 row)
\c - - - :master_port \c - - - :master_port
SET client_min_messages TO ERROR;
SELECT master_add_node('localhost', :worker_2_port); SELECT master_add_node('localhost', :worker_2_port);
NOTICE: Replicating reference table "mx_ref" to the node localhost:xxxxx
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
7 7
(1 row) (1 row)
RESET client_min_messages;
SELECT shardid, nodename, nodeport SELECT shardid, nodename, nodeport
FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE logicalrelid='mx_ref'::regclass WHERE logicalrelid='mx_ref'::regclass
@ -1914,3 +1930,16 @@ ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id;
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
-- Turn metadata sync back on at the end
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)

View File

@ -219,8 +219,9 @@ NOTICE: identifier "append_zero_shard_table_12345678901234567890123456789012345
-- Verify that CREATE INDEX on already distributed table has proper shard names. -- Verify that CREATE INDEX on already distributed table has proper shard names.
CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2); CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2);
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
SET citus.override_table_visibility TO FALSE;
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; relname SIMILAR TO 'tmp_idx_%\_\d{6}' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC;
relname | Column | Type | Definition relname | Column | Type | Definition
--------------------------------------------------------------------- ---------------------------------------------------------------------
tmp_idx_123456789012345678901234567890123456789_5e470afa_225003 | col2 | integer | col2 tmp_idx_123456789012345678901234567890123456789_5e470afa_225003 | col2 | integer | col2
@ -237,8 +238,9 @@ ALTER INDEX tmp_idx_123456789012345678901234567890123456789012345678901234567890
NOTICE: identifier "tmp_idx_123456789012345678901234567890123456789012345678901234567890" will be truncated to "tmp_idx_1234567890123456789012345678901234567890123456789012345" NOTICE: identifier "tmp_idx_123456789012345678901234567890123456789012345678901234567890" will be truncated to "tmp_idx_1234567890123456789012345678901234567890123456789012345"
NOTICE: identifier "tmp_idx_newname_123456789012345678901234567890123456789012345678901234567890" will be truncated to "tmp_idx_newname_12345678901234567890123456789012345678901234567" NOTICE: identifier "tmp_idx_newname_123456789012345678901234567890123456789012345678901234567890" will be truncated to "tmp_idx_newname_12345678901234567890123456789012345678901234567"
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
SET citus.override_table_visibility TO FALSE;
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; relname SIMILAR TO 'tmp_idx_%\_\d{6}' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC;
relname | Column | Type | Definition relname | Column | Type | Definition
--------------------------------------------------------------------- ---------------------------------------------------------------------
tmp_idx_newname_1234567890123456789012345678901_c54e849b_225003 | col2 | integer | col2 tmp_idx_newname_1234567890123456789012345678901_c54e849b_225003 | col2 | integer | col2
@ -337,6 +339,7 @@ SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
(1 row) (1 row)
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
SET citus.override_table_visibility TO FALSE;
SELECT c1.relname AS unique_index_name SELECT c1.relname AS unique_index_name
FROM pg_class c1 FROM pg_class c1
JOIN pg_index i ON i.indexrelid = c1.oid JOIN pg_index i ON i.indexrelid = c1.oid
@ -369,6 +372,7 @@ SELECT create_distributed_table('too_long_12345678901234567890123456789012345678
(1 row) (1 row)
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
SET citus.override_table_visibility TO FALSE;
\dt *225000000000* \dt *225000000000*
List of relations List of relations
Schema | Name | Type | Owner Schema | Name | Type | Owner
@ -401,7 +405,8 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0
(1 row) (1 row)
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
\dt public.elephant_* SET citus.override_table_visibility TO FALSE;
\dt public.elephant_*[0-9]+
List of relations List of relations
Schema | Name | Type | Owner Schema | Name | Type | Owner
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -409,7 +414,7 @@ WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!0
public | elephant_слонслонслонсло_c8b737c2_2250000000003 | table | postgres public | elephant_слонслонслонсло_c8b737c2_2250000000003 | table | postgres
(2 rows) (2 rows)
\di public.elephant_* \di public.elephant_*[0-9]+
List of relations List of relations
Schema | Name | Type | Owner | Table Schema | Name | Type | Owner | Table
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -10,6 +10,7 @@ CREATE SCHEMA sequence_default;
SET search_path = sequence_default, public; SET search_path = sequence_default, public;
-- test both distributed and citus local tables -- test both distributed and citus local tables
SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0); SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -64,13 +65,9 @@ ERROR: cannot add a column involving DEFAULT nextval('..') because the table is
HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint
Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..') Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..')
ALTER TABLE seq_test_0 ADD COLUMN z serial; ALTER TABLE seq_test_0 ADD COLUMN z serial;
ERROR: Cannot add a column involving serial pseudotypes because the table is not empty ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers
HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint
Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..')
ALTER TABLE seq_test_0_local_table ADD COLUMN z serial; ALTER TABLE seq_test_0_local_table ADD COLUMN z serial;
ERROR: Cannot add a column involving serial pseudotypes because the table is not empty ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers
HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint
Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..')
-- follow hint -- follow hint
ALTER TABLE seq_test_0 ADD COLUMN z int; ALTER TABLE seq_test_0 ADD COLUMN z int;
ALTER TABLE seq_test_0 ALTER COLUMN z SET DEFAULT nextval('seq_0'); ALTER TABLE seq_test_0 ALTER COLUMN z SET DEFAULT nextval('seq_0');
@ -127,30 +124,63 @@ SELECT * FROM seq_test_0_local_table ORDER BY 1, 2 LIMIT 5;
--------------------------------------------------------------------- ---------------------------------------------------------------------
integer | 1 | 1 | 2147483647 | 1 | no | 1 integer | 1 | 1 | 2147483647 | 1 | no | 1
-- cannot change the type of a sequence used in a distributed table -- cannot alter a sequence used in a distributed table
-- even if metadata is not synced to workers -- since the metadata is synced to workers
ALTER SEQUENCE seq_0 AS bigint; ALTER SEQUENCE seq_0 AS bigint;
ERROR: Altering a sequence used in a distributed table is currently not supported. ERROR: Altering a distributed sequence is currently not supported.
ALTER SEQUENCE seq_0_local_table AS bigint; ALTER SEQUENCE seq_0_local_table AS bigint;
ERROR: Altering a sequence used in a local table that is added to metadata is currently not supported. ERROR: Altering a distributed sequence is currently not supported.
-- we can change other things like increment -- we can change other things like increment
-- if metadata is not synced to workers -- if metadata is not synced to workers
ALTER SEQUENCE seq_0 INCREMENT BY 2; BEGIN;
ALTER SEQUENCE seq_0_local_table INCREMENT BY 2; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
\d seq_0 NOTICE: dropping metadata on the node (localhost,57637)
Sequence "sequence_default.seq_0" stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
NOTICE: dropping metadata on the node (localhost,57638)
stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
CREATE SEQUENCE seq_13;
CREATE SEQUENCE seq_13_local_table;
CREATE TABLE seq_test_13 (x int, y int);
CREATE TABLE seq_test_13_local_table (x int, y int);
SELECT create_distributed_table('seq_test_13','x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT citus_add_local_table_to_metadata('seq_test_13_local_table');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
ALTER TABLE seq_test_13 ADD COLUMN z int DEFAULT nextval('seq_13');
ALTER TABLE seq_test_13_local_table ADD COLUMN z int DEFAULT nextval('seq_13_local_table');
ALTER SEQUENCE seq_13 INCREMENT BY 2;
ALTER SEQUENCE seq_13_local_table INCREMENT BY 2;
\d seq_13
Sequence "sequence_default.seq_13"
Type | Start | Minimum | Maximum | Increment | Cycles? | Cache Type | Start | Minimum | Maximum | Increment | Cycles? | Cache
--------------------------------------------------------------------- ---------------------------------------------------------------------
integer | 1 | 1 | 2147483647 | 2 | no | 1 integer | 1 | 1 | 2147483647 | 2 | no | 1
\d seq_0_local_table \d seq_13_local_table
Sequence "sequence_default.seq_0_local_table" Sequence "sequence_default.seq_13_local_table"
Type | Start | Minimum | Maximum | Increment | Cycles? | Cache Type | Start | Minimum | Maximum | Increment | Cycles? | Cache
--------------------------------------------------------------------- ---------------------------------------------------------------------
integer | 1 | 1 | 2147483647 | 2 | no | 1 integer | 1 | 1 | 2147483647 | 2 | no | 1
-- check that we can add serial pseudo-type columns -- check that we can add serial pseudo-type columns
-- when metadata is not yet synced to workers -- when metadata is not synced to workers
TRUNCATE seq_test_0; TRUNCATE seq_test_0;
ALTER TABLE seq_test_0 ADD COLUMN w00 smallserial; ALTER TABLE seq_test_0 ADD COLUMN w00 smallserial;
ALTER TABLE seq_test_0 ADD COLUMN w01 serial2; ALTER TABLE seq_test_0 ADD COLUMN w01 serial2;
@ -165,6 +195,7 @@ ALTER TABLE seq_test_0_local_table ADD COLUMN w10 serial;
ALTER TABLE seq_test_0_local_table ADD COLUMN w11 serial4; ALTER TABLE seq_test_0_local_table ADD COLUMN w11 serial4;
ALTER TABLE seq_test_0_local_table ADD COLUMN w20 bigserial; ALTER TABLE seq_test_0_local_table ADD COLUMN w20 bigserial;
ALTER TABLE seq_test_0_local_table ADD COLUMN w21 serial8; ALTER TABLE seq_test_0_local_table ADD COLUMN w21 serial8;
ROLLBACK;
-- check alter column type precaution -- check alter column type precaution
ALTER TABLE seq_test_0 ALTER COLUMN z TYPE bigint; ALTER TABLE seq_test_0 ALTER COLUMN z TYPE bigint;
ERROR: cannot execute ALTER COLUMN TYPE .. command because the column involves a default coming from a sequence ERROR: cannot execute ALTER COLUMN TYPE .. command because the column involves a default coming from a sequence
@ -962,12 +993,6 @@ SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default CASCADE');
(localhost,57638,t,"DROP SCHEMA") (localhost,57638,t,"DROP SCHEMA")
(2 rows) (2 rows)
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
SELECT master_remove_node('localhost', :master_port); SELECT master_remove_node('localhost', :master_port);
master_remove_node master_remove_node
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -371,7 +371,7 @@ COMMIT;
SELECT COUNT(*) FROM pg_dist_transaction; SELECT COUNT(*) FROM pg_dist_transaction;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 2
(1 row) (1 row)
SELECT recover_prepared_transactions(); SELECT recover_prepared_transactions();
@ -393,6 +393,12 @@ SELECT count(DISTINCT nodeport) FROM pg_dist_shard_placement WHERE shardid IN (g
2 2
(1 row) (1 row)
SELECT recover_prepared_transactions();
recover_prepared_transactions
---------------------------------------------------------------------
0
(1 row)
-- only two of the connections will perform a write (INSERT) -- only two of the connections will perform a write (INSERT)
SET citus.force_max_query_parallelization TO ON; SET citus.force_max_query_parallelization TO ON;
BEGIN; BEGIN;

View File

@ -333,6 +333,7 @@ SELECT citus_drop_all_shards('test_local_truncate', 'public', 'test_local_trunca
4 4
(1 row) (1 row)
CREATE TABLE temp_pg_dist_partition_row AS SELECT * FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass;
DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass;
-- Ensure local data is truncated -- Ensure local data is truncated
SELECT * FROM test_local_truncate; SELECT * FROM test_local_truncate;
@ -340,7 +341,9 @@ SELECT * FROM test_local_truncate;
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
INSERT INTO pg_dist_partition SELECT * FROM temp_pg_dist_partition_row;
DROP TABLE test_local_truncate; DROP TABLE test_local_truncate;
DROP TABLE temp_pg_dist_partition_row;
-- Truncate local data, but roll back -- Truncate local data, but roll back
CREATE TABLE test_local_truncate (x int, y int); CREATE TABLE test_local_truncate (x int, y int);
INSERT INTO test_local_truncate VALUES (1,2); INSERT INTO test_local_truncate VALUES (1,2);

View File

@ -0,0 +1,131 @@
CREATE SCHEMA propagate_foreign_server;
SET search_path TO propagate_foreign_server;
-- remove node to add later
SELECT citus_remove_node('localhost', :worker_1_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)
-- create schema, extension and foreign server while the worker is removed
SET citus.enable_ddl_propagation TO OFF;
CREATE SCHEMA test_dependent_schema;
CREATE EXTENSION postgres_fdw WITH SCHEMA test_dependent_schema;
SET citus.enable_ddl_propagation TO ON;
CREATE SERVER foreign_server_dependent_schema
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host 'test');
SELECT 1 FROM citus_add_node('localhost', :worker_1_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- verify the dependent schema and the foreign server are created on the newly added worker
SELECT run_command_on_workers(
$$SELECT COUNT(*) FROM pg_namespace WHERE nspname = 'test_dependent_schema';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
SELECT run_command_on_workers(
$$SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server_dependent_schema';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,t)
(localhost,57638,t,t)
(2 rows)
CREATE SERVER foreign_server_to_drop
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host 'test');
--should error
DROP SERVER foreign_server_dependent_schema, foreign_server_to_drop;
ERROR: cannot drop distributed server with other servers
HINT: Try dropping each object in a separate DROP command
SET client_min_messages TO ERROR;
DROP SCHEMA test_dependent_schema CASCADE;
RESET client_min_messages;
-- test propagating foreign server creation
CREATE EXTENSION postgres_fdw;
CREATE SERVER foreign_server TYPE 'test_type' VERSION 'v1'
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host 'testhost', port '5432', dbname 'testdb');
SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server';
?column?
---------------------------------------------------------------------
t
(1 row)
-- verify that the server is created on the worker
SELECT run_command_on_workers(
$$SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,t)
(localhost,57638,t,t)
(2 rows)
ALTER SERVER foreign_server OPTIONS (ADD "fdw_startup_cost" '1000');
ALTER SERVER foreign_server OPTIONS (ADD passfile 'to_be_dropped');
ALTER SERVER foreign_server OPTIONS (SET host 'localhost');
ALTER SERVER foreign_server OPTIONS (DROP port, DROP dbname);
ALTER SERVER foreign_server OPTIONS (ADD port :'master_port', dbname 'regression', DROP passfile);
ALTER SERVER foreign_server RENAME TO "foreign'server_1!";
-- test alter owner
SELECT rolname FROM pg_roles JOIN pg_foreign_server ON (pg_roles.oid=pg_foreign_server.srvowner) WHERE srvname = 'foreign''server_1!';
rolname
---------------------------------------------------------------------
postgres
(1 row)
ALTER SERVER "foreign'server_1!" OWNER TO pg_monitor;
-- verify that the server is renamed on the worker
SELECT run_command_on_workers(
$$SELECT srvoptions FROM pg_foreign_server WHERE srvname = 'foreign''server_1!';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"{host=localhost,fdw_startup_cost=1000,port=57636,dbname=regression}")
(localhost,57638,t,"{host=localhost,fdw_startup_cost=1000,port=57636,dbname=regression}")
(2 rows)
-- verify the owner is changed
SELECT run_command_on_workers(
$$SELECT rolname FROM pg_roles WHERE oid IN (SELECT srvowner FROM pg_foreign_server WHERE srvname = 'foreign''server_1!');$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,pg_monitor)
(localhost,57638,t,pg_monitor)
(2 rows)
-- verify the owner is changed on the coordinator
SELECT rolname FROM pg_roles JOIN pg_foreign_server ON (pg_roles.oid=pg_foreign_server.srvowner) WHERE srvname = 'foreign''server_1!';
rolname
---------------------------------------------------------------------
pg_monitor
(1 row)
DROP SERVER IF EXISTS "foreign'server_1!" CASCADE;
-- verify that the server is dropped on the worker
SELECT run_command_on_workers(
$$SELECT COUNT(*)=0 FROM pg_foreign_server WHERE srvname = 'foreign''server_1!';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,t)
(localhost,57638,t,t)
(2 rows)
\c - - - :worker_1_port
-- not allowed on the worker
ALTER SERVER foreign_server OPTIONS (ADD async_capable 'False');
ERROR: server "foreign_server" does not exist
CREATE SERVER foreign_server_1 TYPE 'test_type' VERSION 'v1'
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host 'testhost', port '5432', dbname 'testdb');
ERROR: operation is not allowed on this node
HINT: Connect to the coordinator and run it again.
\c - - - :master_port
DROP SCHEMA propagate_foreign_server CASCADE;
NOTICE: drop cascades to extension postgres_fdw

View File

@ -89,6 +89,7 @@ ORDER BY 1;
function citus_relation_size(regclass) function citus_relation_size(regclass)
function citus_remote_connection_stats() function citus_remote_connection_stats()
function citus_remove_node(text,integer) function citus_remove_node(text,integer)
function citus_run_local_command(text)
function citus_server_id() function citus_server_id()
function citus_set_coordinator_host(text,integer,noderole,name) function citus_set_coordinator_host(text,integer,noderole,name)
function citus_set_default_rebalance_strategy(text) function citus_set_default_rebalance_strategy(text)
@ -262,5 +263,5 @@ ORDER BY 1;
view citus_worker_stat_activity view citus_worker_stat_activity
view pg_dist_shard_placement view pg_dist_shard_placement
view time_partitions view time_partitions
(246 rows) (247 rows)

View File

@ -91,6 +91,7 @@ test: isolation_metadata_sync_deadlock
test: isolation_replicated_dist_on_mx test: isolation_replicated_dist_on_mx
# MXless tests # MXless tests
test: isolation_check_mx
test: isolation_turn_mx_off test: isolation_turn_mx_off
test: isolation_replicate_reference_tables_to_coordinator test: isolation_replicate_reference_tables_to_coordinator
test: isolation_reference_copy_vs_all test: isolation_reference_copy_vs_all

View File

@ -26,6 +26,7 @@ test: multi_cluster_management
# below tests are placed right after multi_cluster_management as we do # below tests are placed right after multi_cluster_management as we do
# remove/add node operations and we do not want any preexisting objects # remove/add node operations and we do not want any preexisting objects
test: non_super_user_object_metadata test: non_super_user_object_metadata
test: propagate_foreign_servers
test: alter_role_propagation test: alter_role_propagation
test: propagate_extension_commands test: propagate_extension_commands
test: escape_extension_name test: escape_extension_name
@ -34,11 +35,9 @@ test: alter_database_owner
test: multi_test_catalog_views test: multi_test_catalog_views
test: multi_table_ddl test: multi_table_ddl
test: check_mx
test: multi_sequence_default test: multi_sequence_default
test: turn_mx_off test: turn_mx_off
test: multi_name_lengths test: multi_name_lengths
test: turn_mx_on
test: multi_name_resolution test: multi_name_resolution
test: multi_metadata_access test: multi_metadata_access
test: multi_metadata_attributes test: multi_metadata_attributes
@ -193,8 +192,8 @@ test: check_mx
test: turn_mx_off test: turn_mx_off
# test: multi_generate_ddl_commands multi_repair_shards # test: multi_generate_ddl_commands multi_repair_shards
test: multi_create_shards test: multi_create_shards
test: multi_transaction_recovery
test: turn_mx_on test: turn_mx_on
test: multi_transaction_recovery
test: local_dist_join_modifications test: local_dist_join_modifications
test: local_table_join test: local_table_join
@ -231,10 +230,7 @@ test: multi_drop_extension
# multi_metadata_sync tests the propagation of mx-related metadata changes to metadata workers # multi_metadata_sync tests the propagation of mx-related metadata changes to metadata workers
# multi_unsupported_worker_operations tests that unsupported operations error out on metadata workers # multi_unsupported_worker_operations tests that unsupported operations error out on metadata workers
# ---------- # ----------
test: check_mx
test: turn_mx_off
test: multi_metadata_sync test: multi_metadata_sync
test: turn_mx_on
test: multi_unsupported_worker_operations test: multi_unsupported_worker_operations
# ---------- # ----------
@ -247,8 +243,6 @@ test: multi_schema_support
# ---------- # ----------
# multi_function_evaluation tests edge-cases in master-side function pre-evaluation # multi_function_evaluation tests edge-cases in master-side function pre-evaluation
# ---------- # ----------
test: check_mx
test: turn_mx_off
test: multi_function_evaluation test: multi_function_evaluation
# ---------- # ----------
@ -260,6 +254,8 @@ test: multi_truncate
# multi_colocation_utils tests utility functions written for co-location feature & internal API # multi_colocation_utils tests utility functions written for co-location feature & internal API
# multi_colocated_shard_transfer tests master_copy_shard_placement with colocated tables. # multi_colocated_shard_transfer tests master_copy_shard_placement with colocated tables.
# ---------- # ----------
test: check_mx
test: turn_mx_off
test: multi_colocation_utils test: multi_colocation_utils
test: turn_mx_on test: turn_mx_on
test: multi_colocated_shard_transfer test: multi_colocated_shard_transfer

View File

@ -1,5 +1,6 @@
test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw test: multi_test_helpers multi_test_helpers_superuser
test: multi_cluster_management test: multi_cluster_management
test: multi_create_fdw
test: multi_test_catalog_views test: multi_test_catalog_views
test: replicated_table_disable_node test: replicated_table_disable_node

View File

@ -924,14 +924,6 @@ if (!$conninfo)
'-c', "CREATE FOREIGN DATA WRAPPER $fdw HANDLER $fdws{$fdw};")) == 0 '-c', "CREATE FOREIGN DATA WRAPPER $fdw HANDLER $fdws{$fdw};")) == 0
or die "Could not create foreign data wrapper $fdw on worker"; or die "Could not create foreign data wrapper $fdw on worker";
} }
foreach my $fdwServer (keys %fdwServers)
{
system(catfile($bindir, "psql"),
('-X', '-h', $host, '-p', $port, '-U', $user, "-d", "regression",
'-c', "CREATE SERVER $fdwServer FOREIGN DATA WRAPPER $fdwServers{$fdwServer};")) == 0
or die "Could not create server $fdwServer on worker";
}
} }
} }
else else
@ -958,14 +950,6 @@ else
'-c', "SELECT run_command_on_workers('CREATE FOREIGN DATA WRAPPER $fdw HANDLER $fdws{$fdw};');")) == 0 '-c', "SELECT run_command_on_workers('CREATE FOREIGN DATA WRAPPER $fdw HANDLER $fdws{$fdw};');")) == 0
or die "Could not create foreign data wrapper $fdw on worker"; or die "Could not create foreign data wrapper $fdw on worker";
} }
foreach my $fdwServer (keys %fdwServers)
{
system(catfile($bindir, "psql"),
('-X', '-h', $host, '-p', $masterPort, '-U', $user, "-d", $dbname,
'-c', "SELECT run_command_on_workers('CREATE SERVER $fdwServer FOREIGN DATA WRAPPER $fdwServers{$fdwServer};');")) == 0
or die "Could not create server $fdwServer on worker";
}
} }
# Prepare pg_regress arguments # Prepare pg_regress arguments

View File

@ -0,0 +1,10 @@
session "s1"
step "check_mx"
{
SHOW citus.enable_metadata_sync_by_default;
SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary';
}
permutation "check_mx"

View File

@ -0,0 +1,4 @@
SELECT citus_run_local_command($$SELECT 1; SELECT 1$$);
SELECT citus_run_local_command($$SELECT 1; SELECT 1/0$$);
SELECT citus_run_local_command(NULL);

View File

@ -66,7 +66,7 @@ SELECT name FROM r1 WHERE id = 2;
-- verify a connection attempt was made to the intercepted node, this would have cause the -- verify a connection attempt was made to the intercepted node, this would have cause the
-- connection to have been delayed and thus caused a timeout -- connection to have been delayed and thus caused a timeout
SELECT citus.dump_network_traffic(); SELECT * FROM citus.dump_network_traffic() WHERE conn=0;
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');

View File

@ -40,6 +40,9 @@ RETURNS fdw_handler
AS 'citus' AS 'citus'
LANGUAGE C STRICT; LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER fake_fdw_1 HANDLER fake_fdw_handler; CREATE FOREIGN DATA WRAPPER fake_fdw_1 HANDLER fake_fdw_handler;
SELECT run_command_on_workers($$
CREATE FOREIGN DATA WRAPPER fake_fdw_1 HANDLER fake_fdw_handler;
$$);
CREATE SERVER fake_fdw_server_1 FOREIGN DATA WRAPPER fake_fdw_1; CREATE SERVER fake_fdw_server_1 FOREIGN DATA WRAPPER fake_fdw_1;
CREATE FOREIGN TABLE foreign_table ( CREATE FOREIGN TABLE foreign_table (

View File

@ -106,4 +106,5 @@ SELECT master_copy_shard_placement(
transfer_mode := 'block_writes'); transfer_mode := 'block_writes');
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP TABLE mcsp.history;
DROP SCHEMA mcsp CASCADE; DROP SCHEMA mcsp CASCADE;

View File

@ -420,8 +420,30 @@ INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage) VALUES ('pg_dist
ALTER EXTENSION citus UPDATE TO '11.0-1'; ALTER EXTENSION citus UPDATE TO '11.0-1';
DELETE FROM pg_dist_shard WHERE shardid = 1; DELETE FROM pg_dist_shard WHERE shardid = 1;
-- partitioned table count is tracked on Citus 11 upgrade
CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id);
CREATE TABLE orders_2020_07_01
PARTITION OF e_transactions FOR VALUES IN (1,2,3);
INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', NULL, 7, 's');
SELECT
(metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11,
(metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null
FROM
pg_dist_node_metadata;
-- Test downgrade to 10.2-4 from 11.0-1 -- Test downgrade to 10.2-4 from 11.0-1
ALTER EXTENSION citus UPDATE TO '11.0-1'; ALTER EXTENSION citus UPDATE TO '11.0-1';
SELECT
(metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11,
(metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null
FROM
pg_dist_node_metadata;
DELETE FROM pg_dist_partition WHERE logicalrelid = 'e_transactions'::regclass;
DROP TABLE e_transactions;
ALTER EXTENSION citus UPDATE TO '10.2-4'; ALTER EXTENSION citus UPDATE TO '10.2-4';
-- Should be empty result since upgrade+downgrade should be a no-op -- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes(); SELECT * FROM multi_extension.print_extension_changes();

View File

@ -5,6 +5,9 @@
-- Tests for metadata snapshot functions, metadata syncing functions and propagation of -- Tests for metadata snapshot functions, metadata syncing functions and propagation of
-- metadata changes to MX tables. -- metadata changes to MX tables.
-- Turn metadata sync off at first
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
SET citus.replicate_reference_tables_on_activate TO off; SET citus.replicate_reference_tables_on_activate TO off;
@ -674,7 +677,9 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE logicalrelid='mx_ref'::regclass; WHERE logicalrelid='mx_ref'::regclass;
\c - - - :master_port \c - - - :master_port
SET client_min_messages TO ERROR;
SELECT master_add_node('localhost', :worker_2_port); SELECT master_add_node('localhost', :worker_2_port);
RESET client_min_messages;
SELECT shardid, nodename, nodeport SELECT shardid, nodename, nodeport
FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
@ -859,3 +864,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id;
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
-- Turn metadata sync back on at the end
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);

View File

@ -163,8 +163,9 @@ CREATE INDEX append_zero_shard_table_idx_123456789012345678901234567890123456789
CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2); CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2);
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
SET citus.override_table_visibility TO FALSE;
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; relname SIMILAR TO 'tmp_idx_%\_\d{6}' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC;
\c - - :master_host :master_port \c - - :master_host :master_port
-- Verify that a new index name > 63 characters is auto-truncated -- Verify that a new index name > 63 characters is auto-truncated
@ -175,8 +176,9 @@ CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890123456789
ALTER INDEX tmp_idx_123456789012345678901234567890123456789012345678901234567890 RENAME TO tmp_idx_newname_123456789012345678901234567890123456789012345678901234567890; ALTER INDEX tmp_idx_123456789012345678901234567890123456789012345678901234567890 RENAME TO tmp_idx_newname_123456789012345678901234567890123456789012345678901234567890;
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
SET citus.override_table_visibility TO FALSE;
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC; relname SIMILAR TO 'tmp_idx_%\_\d{6}' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC;
\c - - :master_host :master_port \c - - :master_host :master_port
SET citus.shard_count TO 2; SET citus.shard_count TO 2;
@ -236,6 +238,7 @@ CREATE TABLE sneaky_name_lengths (
SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
SET citus.override_table_visibility TO FALSE;
SELECT c1.relname AS unique_index_name SELECT c1.relname AS unique_index_name
FROM pg_class c1 FROM pg_class c1
@ -263,6 +266,7 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
SET citus.override_table_visibility TO FALSE;
\dt *225000000000* \dt *225000000000*
\c - - :master_host :master_port \c - - :master_host :master_port
@ -283,8 +287,9 @@ FROM pg_dist_shard
WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass; WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass;
\c - - :public_worker_1_host :worker_1_port \c - - :public_worker_1_host :worker_1_port
\dt public.elephant_* SET citus.override_table_visibility TO FALSE;
\di public.elephant_* \dt public.elephant_*[0-9]+
\di public.elephant_*[0-9]+
\c - - :master_host :master_port \c - - :master_host :master_port
SET citus.shard_count TO 2; SET citus.shard_count TO 2;

View File

@ -47,20 +47,33 @@ SELECT * FROM seq_test_0_local_table ORDER BY 1, 2 LIMIT 5;
-- in this case column z is of type int -- in this case column z is of type int
\d seq_0 \d seq_0
\d seq_0_local_table \d seq_0_local_table
-- cannot change the type of a sequence used in a distributed table -- cannot alter a sequence used in a distributed table
-- even if metadata is not synced to workers -- since the metadata is synced to workers
ALTER SEQUENCE seq_0 AS bigint; ALTER SEQUENCE seq_0 AS bigint;
ALTER SEQUENCE seq_0_local_table AS bigint; ALTER SEQUENCE seq_0_local_table AS bigint;
-- we can change other things like increment -- we can change other things like increment
-- if metadata is not synced to workers -- if metadata is not synced to workers
ALTER SEQUENCE seq_0 INCREMENT BY 2; BEGIN;
ALTER SEQUENCE seq_0_local_table INCREMENT BY 2; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
\d seq_0 SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
\d seq_0_local_table CREATE SEQUENCE seq_13;
CREATE SEQUENCE seq_13_local_table;
CREATE TABLE seq_test_13 (x int, y int);
CREATE TABLE seq_test_13_local_table (x int, y int);
SELECT create_distributed_table('seq_test_13','x');
SELECT citus_add_local_table_to_metadata('seq_test_13_local_table');
ALTER TABLE seq_test_13 ADD COLUMN z int DEFAULT nextval('seq_13');
ALTER TABLE seq_test_13_local_table ADD COLUMN z int DEFAULT nextval('seq_13_local_table');
ALTER SEQUENCE seq_13 INCREMENT BY 2;
ALTER SEQUENCE seq_13_local_table INCREMENT BY 2;
\d seq_13
\d seq_13_local_table
-- check that we can add serial pseudo-type columns -- check that we can add serial pseudo-type columns
-- when metadata is not yet synced to workers -- when metadata is not synced to workers
TRUNCATE seq_test_0; TRUNCATE seq_test_0;
ALTER TABLE seq_test_0 ADD COLUMN w00 smallserial; ALTER TABLE seq_test_0 ADD COLUMN w00 smallserial;
ALTER TABLE seq_test_0 ADD COLUMN w01 serial2; ALTER TABLE seq_test_0 ADD COLUMN w01 serial2;
@ -77,6 +90,8 @@ ALTER TABLE seq_test_0_local_table ADD COLUMN w11 serial4;
ALTER TABLE seq_test_0_local_table ADD COLUMN w20 bigserial; ALTER TABLE seq_test_0_local_table ADD COLUMN w20 bigserial;
ALTER TABLE seq_test_0_local_table ADD COLUMN w21 serial8; ALTER TABLE seq_test_0_local_table ADD COLUMN w21 serial8;
ROLLBACK;
-- check alter column type precaution -- check alter column type precaution
ALTER TABLE seq_test_0 ALTER COLUMN z TYPE bigint; ALTER TABLE seq_test_0 ALTER COLUMN z TYPE bigint;
ALTER TABLE seq_test_0 ALTER COLUMN z TYPE smallint; ALTER TABLE seq_test_0 ALTER COLUMN z TYPE smallint;
@ -468,6 +483,5 @@ DROP TABLE sequence_default.seq_test_7_par;
SET client_min_messages TO error; -- suppress cascading objects dropping SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA sequence_default CASCADE; DROP SCHEMA sequence_default CASCADE;
SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default CASCADE'); SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default CASCADE');
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
SELECT master_remove_node('localhost', :master_port); SELECT master_remove_node('localhost', :master_port);
SET search_path TO public; SET search_path TO public;

View File

@ -207,6 +207,7 @@ SELECT citus_move_shard_placement((SELECT * FROM selected_shard), 'localhost', :
-- for the following test, ensure that 6 and 7 go to different shards on different workers -- for the following test, ensure that 6 and 7 go to different shards on different workers
SELECT count(DISTINCT nodeport) FROM pg_dist_shard_placement WHERE shardid IN (get_shard_id_for_distribution_column('test_2pcskip', 6),get_shard_id_for_distribution_column('test_2pcskip', 7)); SELECT count(DISTINCT nodeport) FROM pg_dist_shard_placement WHERE shardid IN (get_shard_id_for_distribution_column('test_2pcskip', 6),get_shard_id_for_distribution_column('test_2pcskip', 7));
SELECT recover_prepared_transactions();
-- only two of the connections will perform a write (INSERT) -- only two of the connections will perform a write (INSERT)
SET citus.force_max_query_parallelization TO ON; SET citus.force_max_query_parallelization TO ON;
BEGIN; BEGIN;

View File

@ -206,12 +206,15 @@ SELECT * FROM test_local_truncate;
-- Undistribute table -- Undistribute table
SELECT citus_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate'); SELECT citus_drop_all_shards('test_local_truncate', 'public', 'test_local_truncate');
CREATE TABLE temp_pg_dist_partition_row AS SELECT * FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass;
DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass;
-- Ensure local data is truncated -- Ensure local data is truncated
SELECT * FROM test_local_truncate; SELECT * FROM test_local_truncate;
INSERT INTO pg_dist_partition SELECT * FROM temp_pg_dist_partition_row;
DROP TABLE test_local_truncate; DROP TABLE test_local_truncate;
DROP TABLE temp_pg_dist_partition_row;
-- Truncate local data, but roll back -- Truncate local data, but roll back
CREATE TABLE test_local_truncate (x int, y int); CREATE TABLE test_local_truncate (x int, y int);

View File

@ -0,0 +1,83 @@
CREATE SCHEMA propagate_foreign_server;
SET search_path TO propagate_foreign_server;
-- remove node to add later
SELECT citus_remove_node('localhost', :worker_1_port);
-- create schema, extension and foreign server while the worker is removed
SET citus.enable_ddl_propagation TO OFF;
CREATE SCHEMA test_dependent_schema;
CREATE EXTENSION postgres_fdw WITH SCHEMA test_dependent_schema;
SET citus.enable_ddl_propagation TO ON;
CREATE SERVER foreign_server_dependent_schema
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host 'test');
SELECT 1 FROM citus_add_node('localhost', :worker_1_port);
-- verify the dependent schema and the foreign server are created on the newly added worker
SELECT run_command_on_workers(
$$SELECT COUNT(*) FROM pg_namespace WHERE nspname = 'test_dependent_schema';$$);
SELECT run_command_on_workers(
$$SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server_dependent_schema';$$);
CREATE SERVER foreign_server_to_drop
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host 'test');
--should error
DROP SERVER foreign_server_dependent_schema, foreign_server_to_drop;
SET client_min_messages TO ERROR;
DROP SCHEMA test_dependent_schema CASCADE;
RESET client_min_messages;
-- test propagating foreign server creation
CREATE EXTENSION postgres_fdw;
CREATE SERVER foreign_server TYPE 'test_type' VERSION 'v1'
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host 'testhost', port '5432', dbname 'testdb');
SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server';
-- verify that the server is created on the worker
SELECT run_command_on_workers(
$$SELECT COUNT(*)=1 FROM pg_foreign_server WHERE srvname = 'foreign_server';$$);
ALTER SERVER foreign_server OPTIONS (ADD "fdw_startup_cost" '1000');
ALTER SERVER foreign_server OPTIONS (ADD passfile 'to_be_dropped');
ALTER SERVER foreign_server OPTIONS (SET host 'localhost');
ALTER SERVER foreign_server OPTIONS (DROP port, DROP dbname);
ALTER SERVER foreign_server OPTIONS (ADD port :'master_port', dbname 'regression', DROP passfile);
ALTER SERVER foreign_server RENAME TO "foreign'server_1!";
-- test alter owner
SELECT rolname FROM pg_roles JOIN pg_foreign_server ON (pg_roles.oid=pg_foreign_server.srvowner) WHERE srvname = 'foreign''server_1!';
ALTER SERVER "foreign'server_1!" OWNER TO pg_monitor;
-- verify that the server is renamed on the worker
SELECT run_command_on_workers(
$$SELECT srvoptions FROM pg_foreign_server WHERE srvname = 'foreign''server_1!';$$);
-- verify the owner is changed
SELECT run_command_on_workers(
$$SELECT rolname FROM pg_roles WHERE oid IN (SELECT srvowner FROM pg_foreign_server WHERE srvname = 'foreign''server_1!');$$);
-- verify the owner is changed on the coordinator
SELECT rolname FROM pg_roles JOIN pg_foreign_server ON (pg_roles.oid=pg_foreign_server.srvowner) WHERE srvname = 'foreign''server_1!';
DROP SERVER IF EXISTS "foreign'server_1!" CASCADE;
-- verify that the server is dropped on the worker
SELECT run_command_on_workers(
$$SELECT COUNT(*)=0 FROM pg_foreign_server WHERE srvname = 'foreign''server_1!';$$);
\c - - - :worker_1_port
-- not allowed on the worker
ALTER SERVER foreign_server OPTIONS (ADD async_capable 'False');
CREATE SERVER foreign_server_1 TYPE 'test_type' VERSION 'v1'
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host 'testhost', port '5432', dbname 'testdb');
\c - - - :master_port
DROP SCHEMA propagate_foreign_server CASCADE;

View File

@ -5,4 +5,4 @@ test: ch_benchmarks_4 ch_benchmarks_5 ch_benchmarks_6
test: intermediate_result_pruning_queries_1 intermediate_result_pruning_queries_2 test: intermediate_result_pruning_queries_1 intermediate_result_pruning_queries_2
test: dropped_columns_1 distributed_planning test: dropped_columns_1 distributed_planning
test: local_dist_join test: local_dist_join
test: connectivity_checks test: connectivity_checks citus_run_command