Compare commits

...

16 Commits

Author SHA1 Message Date
Gürkan İndibay 56fdf0e80e
Bump Citus to 12.0.1 (#7506) 2024-02-14 08:40:45 +03:00
Gürkan İndibay 1dcd5ff046
Adds Changelog for v12.0.1 (#7498)
Co-authored-by: Onur Tirtir <onurcantirtir@gmail.com>
2024-02-13 17:09:57 +03:00
Teja Mupparti b3da549da9 Fix the incorrect column count after ALTER TABLE, this fixes the bug #7378 (please read the analysis in the bug for more information)
(cherry picked from commit 00068e07c5)
2024-01-26 08:15:11 -08:00
Gokhan Gulbiz 0ef2e14b16
Backport GHA Migration to release-12.0 (#7314)
---------

Co-authored-by: Jelte Fennema-Nio <jelte.fennema@microsoft.com>
Co-authored-by: aykut-bozkurt <51649454+aykut-bozkurt@users.noreply.github.com>
2023-11-10 09:35:32 +00:00
Onur Tirtir 03d11bbebd Make sure to disallow creating a replicated distributed table concurrently (#7219)
See explanation in https://github.com/citusdata/citus/issues/7216.
Fixes https://github.com/citusdata/citus/issues/7216.

DESCRIPTION: Makes sure to disallow creating a replicated distributed
table concurrently

(cherry picked from commit 111b4c19bc)
2023-10-24 14:04:36 +03:00
Nils Dijk 350db42577
Fix leaking of memory and memory contexts in Foreign Constraint Graphs (#7236)
DESCRIPTION: Fix leaking of memory and memory contexts in Foreign
Constraint Graphs

Previously, every time we (re)created the Foreign Constraint
Relationship Graph, we created a new Memory Context while loosing a
reference to the previous context. This old context could still have
left over memory in there causing a memory leak.

With this patch we statically have one memory context that we lazily
initialize the first time we create our foreign constraint relationship
graph. On every subsequent creation, beside destroying our previous
hashmap we also reset our memory context to remove any left over
references.
2023-10-09 13:07:47 +02:00
Gürkan İndibay 72b62ea0d1 Removes ubuntu:kinetic pipelines since it's EOL (#7195)
ubuntu:kinetic is EOL so removing it's pipeline

https://fridge.ubuntu.com/2023/06/14/ubuntu-22-10-kinetic-kudu-reaches-end-of-life-on-july-20-2023/
(cherry picked from commit e0683aab84)
2023-09-26 16:54:37 +03:00
Gürkan İndibay 7a22dd581a Removes pg_send_cancellation (#7135)
DESCRIPTION: Removes pg_send_cancellation and all references
(cherry picked from commit 371f094b68)
2023-09-26 16:54:37 +03:00
Hanefi Onaldi 2105c20344
Create a new colocation properly after braking one
When braking a colocation, we need to create a new colocation group
record in pg_dist_colocation for the relation. It is not sufficient to
have a new colocationid value in pg_dist_partition only.

This patch also fixes a bug when deleting a colocation group if no
tables are left in it. Previously we passed a relation id as a parameter
to DeleteColocationGroupIfNoTablesBelong function, where we should have
passed a colocation id.

(cherry picked from commit c22547d221)
2023-09-05 11:44:22 +03:00
Naisila Puka db8e12f418 Disable statistics collection (#7162)
Enabled by mistake in

ba40eb363c
(cherry picked from commit a17fae36b9)
2023-08-29 16:13:32 +03:00
Naisila Puka ff268fb621 Changes PROCESS_TOAST default value to true (#7122)
Process toast should be true by default, like in PG.

(cherry picked from commit b982f2dee6)
2023-08-29 16:13:21 +03:00
zhjwpku 0ae05018f1 PQputCopyData's return value 0 should be considered fail (#7152) 2023-08-29 11:20:25 +02:00
Onur Tirtir 7d24ed0d8b Makes sure to handle NULL constraints for ADD COLUMN commands (#7093)
DESCRIPTION: Fixes a bug that causes an unexpected error when adding a
column with a NULL constraint

Fixes https://github.com/citusdata/citus/issues/7092.

(cherry picked from commit dd6ea1ebd5)
2023-08-14 10:51:53 +03:00
Halil Ozan AkgĂĽl b729a2b519
Add 11.3-2 backporting changes (#7062)
This PR moves `citus_shard_sizes` changes from #7003, and #7018 to a new
Citus version 11.3-2

This PR backports the changes to 12.0 release branch, there is another
PR, #7051 for 11.3 release branch, and one, #7050, for main branch.
2023-07-14 17:19:45 +03:00
aykut-bozkurt 19ef03ab72
Changelog entries for 12.0.0 (#7049)
Co-authored-by: Onur Tirtir <onurcantirtir@gmail.com>
Co-authored-by: Gokhan Gulbiz <ggulbiz@gmail.com>
(cherry picked from commit ee255cd46e)
2023-07-14 00:22:32 +03:00
aykut-bozkurt b3e6c036dd
Bump Citus version to 12.0.0 2023-07-14 00:22:25 +03:00
53 changed files with 1129 additions and 1741 deletions

View File

@ -1,991 +0,0 @@
version: 2.1
orbs:
codecov: codecov/codecov@1.1.1
azure-cli: circleci/azure-cli@1.0.0
parameters:
image_suffix:
type: string
default: '-vbab548a'
pg14_version:
type: string
default: '14.8'
pg15_version:
type: string
default: '15.3'
upgrade_pg_versions:
type: string
default: '14.8-15.3'
style_checker_tools_version:
type: string
default: '0.8.18'
flaky_test:
type: string
default: ''
flaky_test_runs_per_job:
type: integer
default: 50
skip_flaky_tests:
type: boolean
default: false
commands:
install_extension:
parameters:
pg_major:
description: 'postgres major version to use'
type: integer
steps:
- run:
name: 'Install Extension'
command: |
tar xfv "${CIRCLE_WORKING_DIRECTORY}/install-<< parameters.pg_major >>.tar" --directory /
configure:
steps:
- run:
name: 'Configure'
command: |
chown -R circleci .
gosu circleci ./configure --without-pg-version-check
enable_core:
steps:
- run:
name: 'Enable core dumps'
command: |
ulimit -c unlimited
save_regressions:
steps:
- run:
name: 'Regressions'
command: |
if [ -f "src/test/regress/regression.diffs" ]; then
cat src/test/regress/regression.diffs
exit 1
fi
when: on_fail
- store_artifacts:
name: 'Save regressions'
path: src/test/regress/regression.diffs
save_logs_and_results:
steps:
- store_artifacts:
name: 'Save mitmproxy output (failure test specific)'
path: src/test/regress/proxy.output
- store_artifacts:
name: 'Save results'
path: src/test/regress/results/
- store_artifacts:
name: 'Save coordinator log'
path: src/test/regress/tmp_check/master/log
- store_artifacts:
name: 'Save worker1 log'
path: src/test/regress/tmp_check/worker.57637/log
- store_artifacts:
name: 'Save worker2 log'
path: src/test/regress/tmp_check/worker.57638/log
stack_trace:
steps:
- run:
name: 'Print stack traces'
command: |
./ci/print_stack_trace.sh
when: on_fail
coverage:
parameters:
flags:
description: 'codecov flags'
type: string
steps:
- codecov/upload:
flags: '<< parameters.flags >>'
- run:
name: 'Create codeclimate coverage'
command: |
lcov --directory . --capture --output-file lcov.info
lcov --remove lcov.info -o lcov.info '/usr/*'
sed "s=^SF:$PWD/=SF:=g" -i lcov.info # relative pats are required by codeclimate
mkdir -p /tmp/codeclimate
# We started getting permissions error. This fixes them and since
# weqre not on a multi-user system so this is safe to do.
git config --global --add safe.directory /home/circleci/project
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/$CIRCLE_JOB.json lcov.info
- persist_to_workspace:
root: /tmp
paths:
- codeclimate/*.json
jobs:
build:
description: Build the citus extension
parameters:
pg_major:
description: postgres major version building citus for
type: integer
image:
description: docker image to use for the build
type: string
default: citus/extbuilder
image_tag:
description: tag to use for the docker image
type: string
docker:
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
steps:
- checkout
- run:
name: 'Configure, Build, and Install'
command: |
./ci/build-citus.sh
- persist_to_workspace:
root: .
paths:
- build-<< parameters.pg_major >>/*
- install-<<parameters.pg_major >>.tar
check-style:
docker:
- image: 'citus/stylechecker:<< pipeline.parameters.style_checker_tools_version >><< pipeline.parameters.image_suffix >>'
steps:
- checkout
- run:
name: 'Check C Style'
command: citus_indent --check
- run:
name: 'Check Python style'
command: black --check .
- run:
name: 'Check Python import order'
command: isort --check .
- run:
name: 'Check Python lints'
command: flake8 .
- run:
name: 'Fix whitespace'
command: ci/editorconfig.sh && git diff --exit-code
- run:
name: 'Remove useless declarations'
command: ci/remove_useless_declarations.sh && git diff --cached --exit-code
- run:
name: 'Normalize test output'
command: ci/normalize_expected.sh && git diff --exit-code
- run:
name: 'Check for C-style comments in migration files'
command: ci/disallow_c_comments_in_migrations.sh && git diff --exit-code
- run:
name: 'Check for comment--cached ns that start with # character in spec files'
command: ci/disallow_hash_comments_in_spec_files.sh && git diff --exit-code
- run:
name: 'Check for gitignore entries .for source files'
command: ci/fix_gitignore.sh && git diff --exit-code
- run:
name: 'Check for lengths of changelog entries'
command: ci/disallow_long_changelog_entries.sh
- run:
name: 'Check for banned C API usage'
command: ci/banned.h.sh
- run:
name: 'Check for tests missing in schedules'
command: ci/check_all_tests_are_run.sh
- run:
name: 'Check if all CI scripts are actually run'
command: ci/check_all_ci_scripts_are_run.sh
- run:
name: 'Check if all GUCs are sorted alphabetically'
command: ci/check_gucs_are_alphabetically_sorted.sh
- run:
name: 'Check for missing downgrade scripts'
command: ci/check_migration_files.sh
check-sql-snapshots:
docker:
- image: 'citus/extbuilder:latest'
steps:
- checkout
- run:
name: 'Check Snapshots'
command: ci/check_sql_snapshots.sh
test-pg-upgrade:
description: Runs postgres upgrade tests
parameters:
old_pg_major:
description: 'postgres major version to use before the upgrade'
type: integer
new_pg_major:
description: 'postgres major version to upgrade to'
type: integer
image:
description: 'docker image to use as for the tests'
type: string
default: citus/pgupgradetester
image_tag:
description: 'docker image tag to use'
type: string
docker:
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- install_extension:
pg_major: << parameters.old_pg_major >>
- install_extension:
pg_major: << parameters.new_pg_major >>
- configure
- enable_core
- run:
name: 'Install and test postgres upgrade'
command: |
gosu circleci \
make -C src/test/regress \
check-pg-upgrade \
old-bindir=/usr/lib/postgresql/<< parameters.old_pg_major >>/bin \
new-bindir=/usr/lib/postgresql/<< parameters.new_pg_major >>/bin
no_output_timeout: 2m
- run:
name: 'Copy pg_upgrade logs for newData dir'
command: |
mkdir -p /tmp/pg_upgrade_newData_logs
if ls src/test/regress/tmp_upgrade/newData/*.log 1> /dev/null 2>&1; then
cp src/test/regress/tmp_upgrade/newData/*.log /tmp/pg_upgrade_newData_logs
fi
when: on_fail
- store_artifacts:
name: 'Save pg_upgrade logs for newData dir'
path: /tmp/pg_upgrade_newData_logs
- save_logs_and_results
- save_regressions
- stack_trace
- coverage:
flags: 'test_<< parameters.old_pg_major >>_<< parameters.new_pg_major >>,upgrade'
test-pytest:
description: Runs pytest based tests
parameters:
pg_major:
description: 'postgres major version'
type: integer
image:
description: 'docker image to use as for the tests'
type: string
default: citus/failtester
image_tag:
description: 'docker image tag to use'
type: string
docker:
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- install_extension:
pg_major: << parameters.pg_major >>
- configure
- enable_core
- run:
name: 'Run pytest'
command: |
gosu circleci \
make -C src/test/regress check-pytest
no_output_timeout: 2m
- stack_trace
- coverage:
flags: 'test_<< parameters.pg_major >>,pytest'
test-arbitrary-configs:
description: Runs tests on arbitrary configs
parallelism: 6
parameters:
pg_major:
description: 'postgres major version to use'
type: integer
image:
description: 'docker image to use as for the tests'
type: string
default: citus/failtester
image_tag:
description: 'docker image tag to use'
type: string
docker:
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
resource_class: xlarge
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- install_extension:
pg_major: << parameters.pg_major >>
- configure
- enable_core
- run:
name: 'Test arbitrary configs'
command: |
TESTS=$(src/test/regress/citus_tests/print_test_names.py | circleci tests split)
# Our test suite expects comma separated values
TESTS=$(echo $TESTS | tr ' ' ',')
# TESTS will contain subset of configs that will be run on a container and we use multiple containers
# to run the test suite
gosu circleci \
make -C src/test/regress \
check-arbitrary-configs parallel=4 CONFIGS=$TESTS
no_output_timeout: 2m
- run:
name: 'Show regressions'
command: |
find src/test/regress/tmp_citus_test/ -name "regression*.diffs" -exec cat {} +
lines=$(find src/test/regress/tmp_citus_test/ -name "regression*.diffs" | wc -l)
if [ $lines -ne 0 ]; then
exit 1
fi
when: on_fail
- run:
name: 'Copy logfiles'
command: |
mkdir src/test/regress/tmp_citus_test/logfiles
find src/test/regress/tmp_citus_test/ -name "logfile_*" -exec cp -t src/test/regress/tmp_citus_test/logfiles/ {} +
when: on_fail
- store_artifacts:
name: 'Save logfiles'
path: src/test/regress/tmp_citus_test/logfiles
- save_logs_and_results
- stack_trace
- coverage:
flags: 'test_<< parameters.pg_major >>,upgrade'
test-citus-upgrade:
description: Runs citus upgrade tests
parameters:
pg_major:
description: 'postgres major version'
type: integer
image:
description: 'docker image to use as for the tests'
type: string
default: citus/citusupgradetester
image_tag:
description: 'docker image tag to use'
type: string
docker:
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- configure
- enable_core
- run:
name: 'Install and test citus upgrade'
command: |
# run make check-citus-upgrade for all citus versions
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
for citus_version in ${CITUS_VERSIONS}; do \
gosu circleci \
make -C src/test/regress \
check-citus-upgrade \
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
citus-old-version=${citus_version} \
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \
done;
# run make check-citus-upgrade-mixed for all citus versions
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
for citus_version in ${CITUS_VERSIONS}; do \
gosu circleci \
make -C src/test/regress \
check-citus-upgrade-mixed \
citus-old-version=${citus_version} \
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \
done;
no_output_timeout: 2m
- save_logs_and_results
- save_regressions
- stack_trace
- coverage:
flags: 'test_<< parameters.pg_major >>,upgrade'
test-query-generator:
description: Expects that the generated queries that are run on distributed and local tables would have the same results
parameters:
pg_major:
description: 'postgres major version'
type: integer
image:
description: 'docker image to use as for the tests'
type: string
default: citus/failtester
image_tag:
description: 'docker image tag to use'
type: string
docker:
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- install_extension:
pg_major: << parameters.pg_major >>
- configure
- enable_core
- run:
name: 'Run Test'
command: |
gosu circleci make -C src/test/regress check-query-generator
no_output_timeout: 5m
- run:
name: 'Show regressions'
command: |
find src/test/regress/citus_tests/query_generator/out/ -name "local_dist.diffs" -exec cat {} +
lines=$(find src/test/regress/citus_tests/query_generator/out/ -name "local_dist.diffs" | wc -l)
if [ $lines -ne 0 ]; then
exit 1
fi
when: on_fail
- run:
name: 'Copy logfiles'
command: |
mkdir src/test/regress/tmp_citus_test/logfiles
find src/test/regress/tmp_citus_test/ -name "logfile_*" -exec cp -t src/test/regress/tmp_citus_test/logfiles/ {} +
when: on_fail
- store_artifacts:
name: 'Save logfiles'
path: src/test/regress/tmp_citus_test/logfiles
- store_artifacts:
name: 'Save ddls'
path: src/test/regress/citus_tests/query_generator/out/ddls.sql
- store_artifacts:
name: 'Save dmls'
path: src/test/regress/citus_tests/query_generator/out/queries.sql
- store_artifacts:
name: 'Save diffs'
path: src/test/regress/citus_tests/query_generator/out/local_dist.diffs
- stack_trace
- coverage:
flags: 'test_<< parameters.pg_major >>,querygen'
test-citus:
description: Runs the common tests of citus
parameters:
pg_major:
description: 'postgres major version'
type: integer
image:
description: 'docker image to use as for the tests'
type: string
default: citus/exttester
image_tag:
description: 'docker image tag to use'
type: string
make:
description: 'make target'
type: string
docker:
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- install_extension:
pg_major: << parameters.pg_major >>
- configure
- enable_core
- run:
name: 'Run Test'
command: |
gosu circleci make -C src/test/regress << parameters.make >>
no_output_timeout: 2m
- save_logs_and_results
- save_regressions
- stack_trace
- coverage:
flags: 'test_<< parameters.pg_major >>,<< parameters.make >>'
tap-test-citus:
description: Runs tap tests for citus
parameters:
pg_major:
description: 'postgres major version'
type: integer
image:
description: 'docker image to use as for the tests'
type: string
default: citus/exttester
image_tag:
description: 'docker image tag to use'
type: string
suite:
description: 'name of the tap test suite to run'
type: string
make:
description: 'make target'
type: string
default: installcheck
docker:
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
working_directory: /home/circleci/project
steps:
- checkout
- attach_workspace:
at: .
- install_extension:
pg_major: << parameters.pg_major >>
- configure
- enable_core
- run:
name: 'Run Test'
command: |
gosu circleci make -C src/test/<< parameters.suite >> << parameters.make >>
no_output_timeout: 2m
- store_artifacts:
name: 'Save tap logs'
path: /home/circleci/project/src/test/<< parameters.suite >>/tmp_check/log
- save_logs_and_results
- stack_trace
- coverage:
flags: 'test_<< parameters.pg_major >>,tap_<< parameters.suite >>_<< parameters.make >>'
check-merge-to-enterprise:
docker:
- image: citus/extbuilder:<< pipeline.parameters.pg14_version >>
working_directory: /home/circleci/project
steps:
- checkout
- run:
command: |
ci/check_enterprise_merge.sh
ch_benchmark:
docker:
- image: buildpack-deps:stretch
working_directory: /home/circleci/project
steps:
- checkout
- azure-cli/install
- azure-cli/login-with-service-principal
- run:
command: |
cd ./src/test/hammerdb
sh run_hammerdb.sh citusbot_ch_benchmark_rg
name: install dependencies and run ch_benchmark tests
no_output_timeout: 20m
tpcc_benchmark:
docker:
- image: buildpack-deps:stretch
working_directory: /home/circleci/project
steps:
- checkout
- azure-cli/install
- azure-cli/login-with-service-principal
- run:
command: |
cd ./src/test/hammerdb
sh run_hammerdb.sh citusbot_tpcc_benchmark_rg
name: install dependencies and run ch_benchmark tests
no_output_timeout: 20m
test-flakyness:
description: Runs a test multiple times to see if it's flaky
parallelism: 32
parameters:
pg_major:
description: 'postgres major version'
type: integer
image:
description: 'docker image to use as for the tests'
type: string
default: citus/failtester
image_tag:
description: 'docker image tag to use'
type: string
test:
description: 'the test file path that should be run multiple times'
type: string
default: ''
runs:
description: 'number of times that the test should be run in total'
type: integer
default: 8
skip:
description: 'A flag to bypass flaky test detection.'
type: boolean
default: false
docker:
- image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>'
working_directory: /home/circleci/project
resource_class: small
steps:
- checkout
- attach_workspace:
at: .
- run:
name: 'Detect regression tests need to be ran'
command: |
skip=<< parameters.skip >>
if [ "$skip" = true ]; then
echo "Skipping flaky test detection."
circleci-agent step halt
fi
testForDebugging="<< parameters.test >>"
if [ -z "$testForDebugging" ]; then
detected_changes=$(git diff origin/main... --name-only --diff-filter=AM | (grep 'src/test/regress/sql/.*\.sql\|src/test/regress/spec/.*\.spec\|src/test/regress/citus_tests/test/test_.*\.py' || true))
tests=${detected_changes}
else
tests=$testForDebugging;
fi
if [ -z "$tests" ]; then
echo "No test found."
circleci-agent step halt
else
echo "Detected tests " $tests
fi
echo export tests=\""$tests"\" >> "$BASH_ENV"
source "$BASH_ENV"
- install_extension:
pg_major: << parameters.pg_major >>
- configure
- enable_core
- run:
name: 'Run minimal tests'
command: |
tests_array=($tests)
for test in "${tests_array[@]}"
do
test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/")
gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat << parameters.runs >> --use-base-schedule --use-whole-schedule-line
done
no_output_timeout: 2m
- save_logs_and_results
- save_regressions
- stack_trace
upload-coverage:
docker:
- image: 'citus/exttester:<< pipeline.parameters.pg15_version >><< pipeline.parameters.image_suffix >>'
working_directory: /home/circleci/project
steps:
- attach_workspace:
at: .
- run:
name: Upload coverage results to Code Climate
command: |
cc-test-reporter sum-coverage codeclimate/*.json -o total.json
cc-test-reporter upload-coverage -i total.json
workflows:
version: 2
flaky_test_debugging:
when: << pipeline.parameters.flaky_test >>
jobs:
- build:
name: build-flaky-15
pg_major: 15
image_tag: '<< pipeline.parameters.pg15_version >>'
- test-flakyness:
name: 'test-15_flaky'
pg_major: 15
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-flaky-15]
test: '<< pipeline.parameters.flaky_test >>'
runs: << pipeline.parameters.flaky_test_runs_per_job >>
build_and_test:
when:
not: << pipeline.parameters.flaky_test >>
jobs:
- build:
name: build-14
pg_major: 14
image_tag: '<< pipeline.parameters.pg14_version >>'
- build:
name: build-15
pg_major: 15
image_tag: '<< pipeline.parameters.pg15_version >>'
- check-style
- check-sql-snapshots
- test-citus: &test-citus-14
name: 'test-14_check-split'
make: check-split
pg_major: 14
image_tag: '<< pipeline.parameters.pg14_version >>'
requires: [build-14]
- test-citus:
<<: *test-citus-14
name: 'test-14_check-enterprise'
make: check-enterprise
- test-citus:
<<: *test-citus-14
name: 'test-14_check-enterprise-isolation'
make: check-enterprise-isolation
- test-citus:
<<: *test-citus-14
name: 'test-14_check-enterprise-isolation-logicalrep-1'
make: check-enterprise-isolation-logicalrep-1
- test-citus:
<<: *test-citus-14
name: 'test-14_check-enterprise-isolation-logicalrep-2'
make: check-enterprise-isolation-logicalrep-2
- test-citus:
<<: *test-citus-14
name: 'test-14_check-enterprise-isolation-logicalrep-3'
make: check-enterprise-isolation-logicalrep-3
- test-citus:
<<: *test-citus-14
name: 'test-14_check-enterprise-failure'
image: citus/failtester
make: check-enterprise-failure
- test-citus:
<<: *test-citus-14
name: 'test-14_check-multi'
make: check-multi
- test-citus:
<<: *test-citus-14
name: 'test-14_check-multi-1'
make: check-multi-1
- test-citus:
<<: *test-citus-14
name: 'test-14_check-mx'
make: check-multi-mx
- test-citus:
<<: *test-citus-14
name: 'test-14_check-vanilla'
make: check-vanilla
- test-citus:
<<: *test-citus-14
name: 'test-14_check-isolation'
make: check-isolation
- test-citus:
<<: *test-citus-14
name: 'test-14_check-operations'
make: check-operations
- test-citus:
<<: *test-citus-14
name: 'test-14_check-follower-cluster'
make: check-follower-cluster
- test-citus:
<<: *test-citus-14
name: 'test-14_check-columnar'
make: check-columnar
- test-citus:
<<: *test-citus-14
name: 'test-14_check-columnar-isolation'
make: check-columnar-isolation
- test-citus:
<<: *test-citus-14
name: 'test-14_check-failure'
image: citus/failtester
make: check-failure
- test-citus: &test-citus-15
name: 'test-15_check-split'
make: check-split
pg_major: 15
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-15]
- test-citus:
<<: *test-citus-15
name: 'test-15_check-enterprise'
make: check-enterprise
- test-citus:
<<: *test-citus-15
name: 'test-15_check-enterprise-isolation'
make: check-enterprise-isolation
- test-citus:
<<: *test-citus-15
name: 'test-15_check-enterprise-isolation-logicalrep-1'
make: check-enterprise-isolation-logicalrep-1
- test-citus:
<<: *test-citus-15
name: 'test-15_check-enterprise-isolation-logicalrep-2'
make: check-enterprise-isolation-logicalrep-2
- test-citus:
<<: *test-citus-15
name: 'test-15_check-enterprise-isolation-logicalrep-3'
make: check-enterprise-isolation-logicalrep-3
- test-citus:
<<: *test-citus-15
name: 'test-15_check-enterprise-failure'
image: citus/failtester
make: check-enterprise-failure
- test-citus:
<<: *test-citus-15
name: 'test-15_check-multi'
make: check-multi
- test-citus:
<<: *test-citus-15
name: 'test-15_check-multi-1'
make: check-multi-1
- test-citus:
<<: *test-citus-15
name: 'test-15_check-mx'
make: check-multi-mx
- test-citus:
<<: *test-citus-15
name: 'test-15_check-vanilla'
make: check-vanilla
- test-citus:
<<: *test-citus-15
name: 'test-15_check-isolation'
make: check-isolation
- test-citus:
<<: *test-citus-15
name: 'test-15_check-operations'
make: check-operations
- test-citus:
<<: *test-citus-15
name: 'test-15_check-follower-cluster'
make: check-follower-cluster
- test-citus:
<<: *test-citus-15
name: 'test-15_check-columnar'
make: check-columnar
- test-citus:
<<: *test-citus-15
name: 'test-15_check-columnar-isolation'
make: check-columnar-isolation
- test-citus:
<<: *test-citus-15
name: 'test-15_check-failure'
image: citus/failtester
make: check-failure
- test-pytest:
name: 'test-14_pytest'
pg_major: 14
image_tag: '<< pipeline.parameters.pg14_version >>'
requires: [build-14]
- test-pytest:
name: 'test-15_pytest'
pg_major: 15
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-15]
- tap-test-citus:
name: 'test-15_tap-cdc'
suite: cdc
pg_major: 15
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-15]
- test-arbitrary-configs:
name: 'test-14_check-arbitrary-configs'
pg_major: 14
image_tag: '<< pipeline.parameters.pg14_version >>'
requires: [build-14]
- test-arbitrary-configs:
name: 'test-15_check-arbitrary-configs'
pg_major: 15
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-15]
- test-query-generator:
name: 'test-14_check-query-generator'
pg_major: 14
image_tag: '<< pipeline.parameters.pg14_version >>'
requires: [build-14]
- test-query-generator:
name: 'test-15_check-query-generator'
pg_major: 15
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-15]
- test-pg-upgrade:
name: 'test-14-15_check-pg-upgrade'
old_pg_major: 14
new_pg_major: 15
image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
requires: [build-14, build-15]
- test-citus-upgrade:
name: test-14_check-citus-upgrade
pg_major: 14
image_tag: '<< pipeline.parameters.pg14_version >>'
requires: [build-14]
- upload-coverage:
requires:
- test-14_check-multi
- test-14_check-multi-1
- test-14_check-mx
- test-14_check-vanilla
- test-14_check-isolation
- test-14_check-operations
- test-14_check-follower-cluster
- test-14_check-columnar
- test-14_check-columnar-isolation
- test-14_check-failure
- test-14_check-enterprise
- test-14_check-enterprise-isolation
- test-14_check-enterprise-isolation-logicalrep-1
- test-14_check-enterprise-isolation-logicalrep-2
- test-14_check-enterprise-isolation-logicalrep-3
- test-14_check-enterprise-failure
- test-14_check-split
- test-14_check-arbitrary-configs
- test-14_check-query-generator
- test-15_check-multi
- test-15_check-multi-1
- test-15_check-mx
- test-15_check-vanilla
- test-15_check-isolation
- test-15_check-operations
- test-15_check-follower-cluster
- test-15_check-columnar
- test-15_check-columnar-isolation
- test-15_check-failure
- test-15_check-enterprise
- test-15_check-enterprise-isolation
- test-15_check-enterprise-isolation-logicalrep-1
- test-15_check-enterprise-isolation-logicalrep-2
- test-15_check-enterprise-isolation-logicalrep-3
- test-15_check-enterprise-failure
- test-15_check-split
- test-15_check-arbitrary-configs
- test-15_check-query-generator
- test-14-15_check-pg-upgrade
- test-14_check-citus-upgrade
- ch_benchmark:
requires: [build-14]
filters:
branches:
only:
- /ch_benchmark\/.*/ # match with ch_benchmark/ prefix
- tpcc_benchmark:
requires: [build-14]
filters:
branches:
only:
- /tpcc_benchmark\/.*/ # match with tpcc_benchmark/ prefix
- test-flakyness:
name: 'test-15_flaky'
pg_major: 15
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-15]
skip: << pipeline.parameters.skip_flaky_tests >>

View File

@ -0,0 +1,23 @@
name: 'Parallelization matrix'
inputs:
count:
required: false
default: 32
outputs:
json:
value: ${{ steps.generate_matrix.outputs.json }}
runs:
using: "composite"
steps:
- name: Generate parallelization matrix
id: generate_matrix
shell: bash
run: |-
json_array="{\"include\": ["
for ((i = 1; i <= ${{ inputs.count }}; i++)); do
json_array+="{\"id\":\"$i\"},"
done
json_array=${json_array%,}
json_array+=" ]}"
echo "json=$json_array" >> "$GITHUB_OUTPUT"
echo "json=$json_array"

View File

@ -0,0 +1,38 @@
name: save_logs_and_results
inputs:
folder:
required: false
default: "log"
runs:
using: composite
steps:
- uses: actions/upload-artifact@v3.1.1
name: Upload logs
with:
name: ${{ inputs.folder }}
if-no-files-found: ignore
path: |
src/test/**/proxy.output
src/test/**/results/
src/test/**/tmp_check/master/log
src/test/**/tmp_check/worker.57638/log
src/test/**/tmp_check/worker.57637/log
src/test/**/*.diffs
src/test/**/out/ddls.sql
src/test/**/out/queries.sql
src/test/**/logfile_*
/tmp/pg_upgrade_newData_logs
- name: Publish regression.diffs
run: |-
diffs="$(find src/test/regress -name "*.diffs" -exec cat {} \;)"
if ! [ -z "$diffs" ]; then
echo '```diff' >> $GITHUB_STEP_SUMMARY
echo -E "$diffs" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
echo -E $diffs
fi
shell: bash
- name: Print stack traces
run: "./ci/print_stack_trace.sh"
if: failure()
shell: bash

View File

@ -0,0 +1,35 @@
name: setup_extension
inputs:
pg_major:
required: false
skip_installation:
required: false
default: false
type: boolean
runs:
using: composite
steps:
- name: Expose $PG_MAJOR to Github Env
run: |-
if [ -z "${{ inputs.pg_major }}" ]; then
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
else
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
fi
shell: bash
- uses: actions/download-artifact@v3.0.1
with:
name: build-${{ env.PG_MAJOR }}
- name: Install Extension
if: ${{ inputs.skip_installation == 'false' }}
run: tar xfv "install-$PG_MAJOR.tar" --directory /
shell: bash
- name: Configure
run: |-
chown -R circleci .
git config --global --add safe.directory ${GITHUB_WORKSPACE}
gosu circleci ./configure --without-pg-version-check
shell: bash
- name: Enable core dumps
run: ulimit -c unlimited
shell: bash

View File

@ -0,0 +1,27 @@
name: coverage
inputs:
flags:
required: false
codecov_token:
required: true
runs:
using: composite
steps:
- uses: codecov/codecov-action@v3
with:
flags: ${{ inputs.flags }}
token: ${{ inputs.codecov_token }}
verbose: true
gcov: true
- name: Create codeclimate coverage
run: |-
lcov --directory . --capture --output-file lcov.info
lcov --remove lcov.info -o lcov.info '/usr/*'
sed "s=^SF:$PWD/=SF:=g" -i lcov.info # relative pats are required by codeclimate
mkdir -p /tmp/codeclimate
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
shell: bash
- uses: actions/upload-artifact@v3.1.1
with:
path: "/tmp/codeclimate/*.json"
name: codeclimate

480
.github/workflows/build_and_test.yml vendored Normal file
View File

@ -0,0 +1,480 @@
name: Build & Test
run-name: Build & Test - ${{ github.event.pull_request.title || github.ref_name }}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
inputs:
skip_test_flakyness:
required: false
default: false
type: boolean
pull_request:
types: [opened, reopened,synchronize]
jobs:
# Since GHA does not interpolate env varibles in matrix context, we need to
# define them in a separate job and use them in other jobs.
params:
runs-on: ubuntu-latest
name: Initialize parameters
outputs:
build_image_name: "citus/extbuilder"
test_image_name: "citus/exttester"
citusupgrade_image_name: "citus/citusupgradetester"
fail_test_image_name: "citus/failtester"
pgupgrade_image_name: "citus/pgupgradetester"
style_checker_image_name: "citus/stylechecker"
style_checker_tools_version: "0.8.18"
image_suffix: "-vbab548a"
pg14_version: '{ "major": "14", "full": "14.8" }'
pg15_version: '{ "major": "15", "full": "15.3" }'
upgrade_pg_versions: "14.8-15.3"
steps:
# Since GHA jobs needs at least one step we use a noop step here.
- name: Set up parameters
run: echo 'noop'
check-sql-snapshots:
needs: params
runs-on: ubuntu-20.04
container:
image: ${{ needs.params.outputs.build_image_name }}:latest
options: --user root
steps:
- uses: actions/checkout@v3.5.0
- name: Check Snapshots
run: |
git config --global --add safe.directory ${GITHUB_WORKSPACE}
ci/check_sql_snapshots.sh
check-style:
needs: params
runs-on: ubuntu-20.04
container:
image: ${{ needs.params.outputs.style_checker_image_name }}:${{ needs.params.outputs.style_checker_tools_version }}${{ needs.params.outputs.image_suffix }}
steps:
- name: Check Snapshots
run: |
git config --global --add safe.directory ${GITHUB_WORKSPACE}
- uses: actions/checkout@v3.5.0
with:
fetch-depth: 0
- name: Check C Style
run: citus_indent --check
- name: Check Python style
run: black --check .
- name: Check Python import order
run: isort --check .
- name: Check Python lints
run: flake8 .
- name: Fix whitespace
run: ci/editorconfig.sh && git diff --exit-code
- name: Remove useless declarations
run: ci/remove_useless_declarations.sh && git diff --cached --exit-code
- name: Normalize test output
run: ci/normalize_expected.sh && git diff --exit-code
- name: Check for C-style comments in migration files
run: ci/disallow_c_comments_in_migrations.sh && git diff --exit-code
- name: 'Check for comment--cached ns that start with # character in spec files'
run: ci/disallow_hash_comments_in_spec_files.sh && git diff --exit-code
- name: Check for gitignore entries .for source files
run: ci/fix_gitignore.sh && git diff --exit-code
- name: Check for lengths of changelog entries
run: ci/disallow_long_changelog_entries.sh
- name: Check for banned C API usage
run: ci/banned.h.sh
- name: Check for tests missing in schedules
run: ci/check_all_tests_are_run.sh
- name: Check if all CI scripts are actually run
run: ci/check_all_ci_scripts_are_run.sh
- name: Check if all GUCs are sorted alphabetically
run: ci/check_gucs_are_alphabetically_sorted.sh
- name: Check for missing downgrade scripts
run: ci/check_migration_files.sh
build:
needs: params
name: Build for PG${{ fromJson(matrix.pg_version).major }}
strategy:
fail-fast: false
matrix:
image_name:
- ${{ needs.params.outputs.build_image_name }}
image_suffix:
- ${{ needs.params.outputs.image_suffix}}
pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }}
runs-on: ubuntu-20.04
container:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
options: --user root
steps:
- uses: actions/checkout@v3.5.0
- name: Expose $PG_MAJOR to Github Env
run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
shell: bash
- name: Build
run: "./ci/build-citus.sh"
shell: bash
- uses: actions/upload-artifact@v3.1.1
with:
name: build-${{ env.PG_MAJOR }}
path: |-
./build-${{ env.PG_MAJOR }}/*
./install-${{ env.PG_MAJOR }}.tar
test-citus:
name: PG${{ fromJson(matrix.pg_version).major }} - ${{ matrix.make }}
strategy:
fail-fast: false
matrix:
suite:
- regress
image_name:
- ${{ needs.params.outputs.test_image_name }}
pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }}
make:
- check-split
- check-multi
- check-multi-1
- check-multi-mx
- check-vanilla
- check-isolation
- check-operations
- check-follower-cluster
- check-columnar
- check-columnar-isolation
- check-enterprise
- check-enterprise-isolation
- check-enterprise-isolation-logicalrep-1
- check-enterprise-isolation-logicalrep-2
- check-enterprise-isolation-logicalrep-3
include:
- make: check-failure
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-failure
pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest
pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: installcheck
suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }}
pg_version: ${{ needs.params.outputs.pg15_version }}
- make: check-query-generator
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-query-generator
pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
runs-on: ubuntu-20.04
container:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root --dns=8.8.8.8
# Due to Github creates a default network for each job, we need to use
# --dns= to have similar DNS settings as our other CI systems or local
# machines. Otherwise, we may see different results.
needs:
- params
- build
steps:
- uses: actions/checkout@v3.5.0
- uses: "./.github/actions/setup_extension"
- name: Run Test
run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }}
timeout-minutes: 20
- uses: "./.github/actions/save_logs_and_results"
if: always()
with:
folder: ${{ fromJson(matrix.pg_version).major }}_${{ matrix.make }}
- uses: "./.github/actions/upload_coverage"
if: always()
with:
flags: ${{ env.PG_MAJOR }}_${{ matrix.suite }}_${{ matrix.make }}
codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-arbitrary-configs:
name: PG${{ fromJson(matrix.pg_version).major }} - check-arbitrary-configs-${{ matrix.parallel }}
runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"]
container:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root
needs:
- params
- build
strategy:
fail-fast: false
matrix:
image_name:
- ${{ needs.params.outputs.fail_test_image_name }}
pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }}
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
steps:
- uses: actions/checkout@v3.5.0
- uses: "./.github/actions/setup_extension"
- name: Test arbitrary configs
run: |-
# we use parallel jobs to split the tests into 6 parts and run them in parallel
# the script below extracts the tests for the current job
N=6 # Total number of jobs (see matrix.parallel)
X=${{ matrix.parallel }} # Current job number
TESTS=$(src/test/regress/citus_tests/print_test_names.py |
tr '\n' ',' | awk -v N="$N" -v X="$X" -F, '{
split("", parts)
for (i = 1; i <= NF; i++) {
parts[i % N] = parts[i % N] $i ","
}
print substr(parts[X], 1, length(parts[X])-1)
}')
echo $TESTS
gosu circleci \
make -C src/test/regress \
check-arbitrary-configs parallel=4 CONFIGS=$TESTS
- uses: "./.github/actions/save_logs_and_results"
if: always()
- uses: "./.github/actions/upload_coverage"
if: always()
with:
flags: ${{ env.pg_major }}_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-pg-upgrade:
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
runs-on: ubuntu-20.04
container:
image: "${{ needs.params.outputs.pgupgrade_image_name }}:${{ needs.params.outputs.upgrade_pg_versions }}${{ needs.params.outputs.image_suffix }}"
options: --user root
needs:
- params
- build
strategy:
fail-fast: false
matrix:
include:
- old_pg_major: 14
new_pg_major: 15
env:
old_pg_major: ${{ matrix.old_pg_major }}
new_pg_major: ${{ matrix.new_pg_major }}
steps:
- uses: actions/checkout@v3.5.0
- uses: "./.github/actions/setup_extension"
with:
pg_major: "${{ env.old_pg_major }}"
- uses: "./.github/actions/setup_extension"
with:
pg_major: "${{ env.new_pg_major }}"
- name: Install and test postgres upgrade
run: |-
gosu circleci \
make -C src/test/regress \
check-pg-upgrade \
old-bindir=/usr/lib/postgresql/${{ env.old_pg_major }}/bin \
new-bindir=/usr/lib/postgresql/${{ env.new_pg_major }}/bin
- name: Copy pg_upgrade logs for newData dir
run: |-
mkdir -p /tmp/pg_upgrade_newData_logs
if ls src/test/regress/tmp_upgrade/newData/*.log 1> /dev/null 2>&1; then
cp src/test/regress/tmp_upgrade/newData/*.log /tmp/pg_upgrade_newData_logs
fi
if: failure()
- uses: "./.github/actions/save_logs_and_results"
if: always()
- uses: "./.github/actions/upload_coverage"
if: always()
with:
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-citus-upgrade:
name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade
runs-on: ubuntu-20.04
container:
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root
needs:
- params
- build
steps:
- uses: actions/checkout@v3.5.0
- uses: "./.github/actions/setup_extension"
with:
skip_installation: true
- name: Install and test citus upgrade
run: |-
# run make check-citus-upgrade for all citus versions
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
for citus_version in ${CITUS_VERSIONS}; do \
gosu circleci \
make -C src/test/regress \
check-citus-upgrade \
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
citus-old-version=${citus_version} \
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
citus-post-tar=${GITHUB_WORKSPACE}/install-$PG_MAJOR.tar; \
done;
# run make check-citus-upgrade-mixed for all citus versions
# the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of
for citus_version in ${CITUS_VERSIONS}; do \
gosu circleci \
make -C src/test/regress \
check-citus-upgrade-mixed \
citus-old-version=${citus_version} \
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
citus-post-tar=${GITHUB_WORKSPACE}/install-$PG_MAJOR.tar; \
done;
- uses: "./.github/actions/save_logs_and_results"
if: always()
- uses: "./.github/actions/upload_coverage"
if: always()
with:
flags: ${{ env.pg_major }}_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }}
upload-coverage:
if: always()
env:
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
runs-on: ubuntu-20.04
container:
image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg15_version).full }}${{ needs.params.outputs.image_suffix }}
needs:
- params
- test-citus
- test-arbitrary-configs
- test-citus-upgrade
- test-pg-upgrade
steps:
- uses: actions/download-artifact@v3.0.1
with:
name: "codeclimate"
path: "codeclimate"
- name: Upload coverage results to Code Climate
run: |-
cc-test-reporter sum-coverage codeclimate/*.json -o total.json
cc-test-reporter upload-coverage -i total.json
ch_benchmark:
name: CH Benchmark
if: startsWith(github.ref, 'refs/heads/ch_benchmark/')
runs-on: ubuntu-20.04
needs:
- build
steps:
- uses: actions/checkout@v3.5.0
- uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: install dependencies and run ch_benchmark tests
uses: azure/CLI@v1
with:
inlineScript: |
cd ./src/test/hammerdb
chmod +x run_hammerdb.sh
run_hammerdb.sh citusbot_ch_benchmark_rg
tpcc_benchmark:
name: TPCC Benchmark
if: startsWith(github.ref, 'refs/heads/tpcc_benchmark/')
runs-on: ubuntu-20.04
needs:
- build
steps:
- uses: actions/checkout@v3.5.0
- uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: install dependencies and run tpcc_benchmark tests
uses: azure/CLI@v1
with:
inlineScript: |
cd ./src/test/hammerdb
chmod +x run_hammerdb.sh
run_hammerdb.sh citusbot_tpcc_benchmark_rg
prepare_parallelization_matrix_32:
name: Parallel 32
if: ${{ needs.test-flakyness-pre.outputs.tests != ''}}
needs: test-flakyness-pre
runs-on: ubuntu-20.04
outputs:
json: ${{ steps.parallelization.outputs.json }}
steps:
- uses: actions/checkout@v3.5.0
- uses: "./.github/actions/parallelization"
id: parallelization
with:
count: 32
test-flakyness-pre:
name: Detect regression tests need to be ran
if: ${{ !inputs.skip_test_flakyness }}}
runs-on: ubuntu-20.04
needs: build
outputs:
tests: ${{ steps.detect-regression-tests.outputs.tests }}
steps:
- uses: actions/checkout@v3.5.0
with:
fetch-depth: 0
- name: Detect regression tests need to be ran
id: detect-regression-tests
run: |-
detected_changes=$(git diff origin/release-12.0... --name-only --diff-filter=AM | (grep 'src/test/regress/sql/.*\.sql\|src/test/regress/spec/.*\.spec\|src/test/regress/citus_tests/test/test_.*\.py' || true))
tests=${detected_changes}
if [ -z "$tests" ]; then
echo "No test found."
else
echo "Detected tests " $tests
fi
echo 'tests<<EOF' >> $GITHUB_OUTPUT
echo "$tests" >> "$GITHUB_OUTPUT"
echo 'EOF' >> $GITHUB_OUTPUT
test-flakyness:
if: false
name: Test flakyness
runs-on: ubuntu-20.04
container:
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ needs.params.outputs.pg15_version }}${{ needs.params.outputs.image_suffix }}
options: --user root
env:
runs: 8
needs:
- params
- build
- test-flakyness-pre
- prepare_parallelization_matrix_32
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/download-artifact@v3.0.1
- uses: "./.github/actions/setup_extension"
- name: Run minimal tests
run: |-
tests="${{ needs.test-flakyness-pre.outputs.tests }}"
tests_array=($tests)
for test in "${tests_array[@]}"
do
test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/")
gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line
done
shell: bash
- uses: "./.github/actions/save_logs_and_results"
if: always()

View File

@ -0,0 +1,79 @@
name: Flaky test debugging
run-name: Flaky test debugging - ${{ inputs.flaky_test }} (${{ inputs.flaky_test_runs_per_job }}x${{ inputs.flaky_test_parallel_jobs }})
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
inputs:
flaky_test:
required: true
type: string
description: Test to run
flaky_test_runs_per_job:
required: false
default: 8
type: number
description: Number of times to run the test
flaky_test_parallel_jobs:
required: false
default: 32
type: number
description: Number of parallel jobs to run
jobs:
build:
name: Build Citus
runs-on: ubuntu-latest
container:
image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
options: --user root
steps:
- uses: actions/checkout@v3.5.0
- name: Configure, Build, and Install
run: |
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
./ci/build-citus.sh
shell: bash
- uses: actions/upload-artifact@v3.1.1
with:
name: build-${{ env.PG_MAJOR }}
path: |-
./build-${{ env.PG_MAJOR }}/*
./install-${{ env.PG_MAJOR }}.tar
prepare_parallelization_matrix:
name: Prepare parallelization matrix
runs-on: ubuntu-latest
outputs:
json: ${{ steps.parallelization.outputs.json }}
steps:
- uses: actions/checkout@v3.5.0
- uses: "./.github/actions/parallelization"
id: parallelization
with:
count: ${{ inputs.flaky_test_parallel_jobs }}
test_flakyness:
name: Test flakyness
runs-on: ubuntu-latest
container:
image: ${{ vars.fail_test_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
options: --user root
needs:
[build, prepare_parallelization_matrix]
env:
test: "${{ inputs.flaky_test }}"
runs: "${{ inputs.flaky_test_runs_per_job }}"
skip: false
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }}
steps:
- uses: actions/checkout@v3.5.0
- uses: "./.github/actions/setup_extension"
- name: Run minimal tests
run: |-
gosu circleci src/test/regress/citus_tests/run_test.py ${{ env.test }} --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line
shell: bash
- uses: "./.github/actions/save_logs_and_results"
if: always()
with:
folder: ${{ matrix.id }}

View File

@ -20,14 +20,16 @@ jobs:
- name: Get Postgres Versions - name: Get Postgres Versions
id: get-postgres-versions id: get-postgres-versions
run: | run: |
# Postgres versions are stored in .circleci/config.yml file in "build-[pg-version] format. Below command set -euxo pipefail
# extracts the versions and get the unique values. # Postgres versions are stored in .github/workflows/build_and_test.yml
pg_versions=`grep -Eo 'build-[[:digit:]]{2}' .circleci/config.yml|sed -e "s/^build-//"|sort|uniq|tr '\n' ','| head -c -1` # file in json strings with major and full keys.
# Below command extracts the versions and get the unique values.
pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE '"major": "[0-9]+", "full": "[0-9.]+"' | sed -E 's/"major": "([0-9]+)", "full": "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',')
pg_versions_array="[ ${pg_versions} ]" pg_versions_array="[ ${pg_versions} ]"
echo "Supported PG Versions: ${pg_versions_array}" echo "Supported PG Versions: ${pg_versions_array}"
# Below line is needed to set the output variable to be used in the next job # Below line is needed to set the output variable to be used in the next job
echo "pg_versions=${pg_versions_array}" >> $GITHUB_OUTPUT echo "pg_versions=${pg_versions_array}" >> $GITHUB_OUTPUT
shell: bash
rpm_build_tests: rpm_build_tests:
name: rpm_build_tests name: rpm_build_tests
needs: get_postgres_versions_from_file needs: get_postgres_versions_from_file
@ -43,7 +45,7 @@ jobs:
- oraclelinux-7 - oraclelinux-7
- oraclelinux-8 - oraclelinux-8
- centos-7 - centos-7
- centos-8 - almalinux-8
- almalinux-9 - almalinux-9
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }} POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
@ -112,7 +114,6 @@ jobs:
- ubuntu-bionic-all - ubuntu-bionic-all
- ubuntu-focal-all - ubuntu-focal-all
- ubuntu-jammy-all - ubuntu-jammy-all
- ubuntu-kinetic-all
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }} POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
@ -155,7 +156,7 @@ jobs:
run: | run: |
echo "Postgres version: ${POSTGRES_VERSION}" echo "Postgres version: ${POSTGRES_VERSION}"
apt-get update -y apt-get update -y || true
## Install required packages to execute packaging tools for deb based distros ## Install required packages to execute packaging tools for deb based distros
apt-get install python3-dev python3-pip -y apt-get install python3-dev python3-pip -y
apt-get purge -y python3-yaml apt-get purge -y python3-yaml

View File

@ -1,3 +1,27 @@
### citus v12.0.1 (July 11, 2023) ###
* Fixes incorrect default value assumption for VACUUM(PROCESS_TOAST) #7122)
* Fixes a bug that causes an unexpected error when adding a column
with a NULL constraint (#7093)
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
* Fixes a bug with deleting colocation groups (#6929)
* Fixes memory and memory contexts leaks in Foreign Constraint Graphs (#7236)
* Fixes shard size bug with too many shards (#7018)
* Fixes the incorrect column count after ALTER TABLE (#7379)
* Improves citus_tables view performance (#7050)
* Makes sure to disallow creating a replicated distributed table
concurrently (#7219)
* Removes pg_send_cancellation and all references (#7135)
### citus v12.0.0 (July 11, 2023) ### ### citus v12.0.0 (July 11, 2023) ###
* Adds support for schema-based sharding. * Adds support for schema-based sharding.

View File

@ -11,7 +11,7 @@ endif
include Makefile.global include Makefile.global
all: extension pg_send_cancellation all: extension
# build columnar only # build columnar only
@ -40,22 +40,14 @@ clean-full:
install-downgrades: install-downgrades:
$(MAKE) -C src/backend/distributed/ install-downgrades $(MAKE) -C src/backend/distributed/ install-downgrades
install-all: install-headers install-pg_send_cancellation install-all: install-headers
$(MAKE) -C src/backend/columnar/ install-all $(MAKE) -C src/backend/columnar/ install-all
$(MAKE) -C src/backend/distributed/ install-all $(MAKE) -C src/backend/distributed/ install-all
# build citus_send_cancellation binary
pg_send_cancellation:
$(MAKE) -C src/bin/pg_send_cancellation/ all
install-pg_send_cancellation: pg_send_cancellation
$(MAKE) -C src/bin/pg_send_cancellation/ install
clean-pg_send_cancellation:
$(MAKE) -C src/bin/pg_send_cancellation/ clean
.PHONY: pg_send_cancellation install-pg_send_cancellation clean-pg_send_cancellation
# Add to generic targets # Add to generic targets
install: install-extension install-headers install-pg_send_cancellation install: install-extension install-headers
clean: clean-extension clean-pg_send_cancellation clean: clean-extension
# apply or check style # apply or check style
reindent: reindent:

View File

@ -15,9 +15,6 @@ PG_MAJOR=${PG_MAJOR:?please provide the postgres major version}
codename=${VERSION#*(} codename=${VERSION#*(}
codename=${codename%)*} codename=${codename%)*}
# get project from argument
project="${CIRCLE_PROJECT_REPONAME}"
# we'll do everything with absolute paths # we'll do everything with absolute paths
basedir="$(pwd)" basedir="$(pwd)"
@ -28,7 +25,7 @@ build_ext() {
pg_major="$1" pg_major="$1"
builddir="${basedir}/build-${pg_major}" builddir="${basedir}/build-${pg_major}"
echo "Beginning build of ${project} for PostgreSQL ${pg_major}..." >&2 echo "Beginning build for PostgreSQL ${pg_major}..." >&2
# do everything in a subdirectory to avoid clutter in current directory # do everything in a subdirectory to avoid clutter in current directory
mkdir -p "${builddir}" && cd "${builddir}" mkdir -p "${builddir}" && cd "${builddir}"

View File

@ -14,8 +14,8 @@ ci_scripts=$(
grep -v -E '^(ci_helpers.sh|fix_style.sh)$' grep -v -E '^(ci_helpers.sh|fix_style.sh)$'
) )
for script in $ci_scripts; do for script in $ci_scripts; do
if ! grep "\\bci/$script\\b" .circleci/config.yml > /dev/null; then if ! grep "\\bci/$script\\b" -r .github > /dev/null; then
echo "ERROR: CI script with name \"$script\" is not actually used in .circleci/config.yml" echo "ERROR: CI script with name \"$script\" is not actually used in .github folder"
exit 1 exit 1
fi fi
if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then

View File

@ -1,96 +0,0 @@
#!/bin/bash
# Testing this script locally requires you to set the following environment
# variables:
# CIRCLE_BRANCH, GIT_USERNAME and GIT_TOKEN
# fail if trying to reference a variable that is not set.
set -u
# exit immediately if a command fails
set -e
# Fail on pipe failures
set -o pipefail
PR_BRANCH="${CIRCLE_BRANCH}"
ENTERPRISE_REMOTE="https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/citusdata/citus-enterprise"
# shellcheck disable=SC1091
source ci/ci_helpers.sh
# List executed commands. This is done so debugging this script is easier when
# it fails. It's explicitly done after git remote add so username and password
# are not shown in CI output (even though it's also filtered out by CircleCI)
set -x
check_compile () {
echo "INFO: checking if merged code can be compiled"
./configure --without-libcurl
make -j10
}
# Clone current git repo (which should be community) to a temporary working
# directory and go there
GIT_DIR_ROOT="$(git rev-parse --show-toplevel)"
TMP_GIT_DIR="$(mktemp --directory -t citus-merge-check.XXXXXXXXX)"
git clone "$GIT_DIR_ROOT" "$TMP_GIT_DIR"
cd "$TMP_GIT_DIR"
# Fails in CI without this
git config user.email "citus-bot@microsoft.com"
git config user.name "citus bot"
# Disable "set -x" temporarily, because $ENTERPRISE_REMOTE contains passwords
{ set +x ; } 2> /dev/null
git remote add enterprise "$ENTERPRISE_REMOTE"
set -x
git remote set-url --push enterprise no-pushing
# Fetch enterprise-master
git fetch enterprise enterprise-master
git checkout "enterprise/enterprise-master"
if git merge --no-commit "origin/$PR_BRANCH"; then
echo "INFO: community PR branch could be merged into enterprise-master"
# check that we can compile after the merge
if check_compile; then
exit 0
fi
echo "WARN: Failed to compile after community PR branch was merged into enterprise"
fi
# undo partial merge
git merge --abort
# If we have a conflict on enterprise merge on the master branch, we have a problem.
# Provide an error message to indicate that enterprise merge is needed to fix this check.
if [[ $PR_BRANCH = master ]]; then
echo "ERROR: Master branch has merge conflicts with enterprise-master."
echo "Try re-running this CI job after merging your changes into enterprise-master."
exit 1
fi
if ! git fetch enterprise "$PR_BRANCH" ; then
echo "ERROR: enterprise/$PR_BRANCH was not found and community PR branch could not be merged into enterprise-master"
exit 1
fi
# Show the top commit of the enterprise PR branch to make debugging easier
git log -n 1 "enterprise/$PR_BRANCH"
# Check that this branch contains the top commit of the current community PR
# branch. If it does not it means it's not up to date with the current PR, so
# the enterprise branch should be updated.
if ! git merge-base --is-ancestor "origin/$PR_BRANCH" "enterprise/$PR_BRANCH" ; then
echo "ERROR: enterprise/$PR_BRANCH is not up to date with community PR branch"
exit 1
fi
# Now check if we can merge the enterprise PR into enterprise-master without
# issues.
git merge --no-commit "enterprise/$PR_BRANCH"
# check that we can compile after the merge
check_compile

18
configure vendored
View File

@ -1,6 +1,6 @@
#! /bin/sh #! /bin/sh
# Guess values for system-dependent variables and create Makefiles. # Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for Citus 12.0devel. # Generated by GNU Autoconf 2.69 for Citus 12.0.1.
# #
# #
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package. # Identity of this package.
PACKAGE_NAME='Citus' PACKAGE_NAME='Citus'
PACKAGE_TARNAME='citus' PACKAGE_TARNAME='citus'
PACKAGE_VERSION='12.0devel' PACKAGE_VERSION='12.0.1'
PACKAGE_STRING='Citus 12.0devel' PACKAGE_STRING='Citus 12.0.1'
PACKAGE_BUGREPORT='' PACKAGE_BUGREPORT=''
PACKAGE_URL='' PACKAGE_URL=''
@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing. # Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh. # This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF cat <<_ACEOF
\`configure' configures Citus 12.0devel to adapt to many kinds of systems. \`configure' configures Citus 12.0.1 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]... Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1324,7 +1324,7 @@ fi
if test -n "$ac_init_help"; then if test -n "$ac_init_help"; then
case $ac_init_help in case $ac_init_help in
short | recursive ) echo "Configuration of Citus 12.0devel:";; short | recursive ) echo "Configuration of Citus 12.0.1:";;
esac esac
cat <<\_ACEOF cat <<\_ACEOF
@ -1429,7 +1429,7 @@ fi
test -n "$ac_init_help" && exit $ac_status test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then if $ac_init_version; then
cat <<\_ACEOF cat <<\_ACEOF
Citus configure 12.0devel Citus configure 12.0.1
generated by GNU Autoconf 2.69 generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc. Copyright (C) 2012 Free Software Foundation, Inc.
@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake. running configure, to aid debugging if configure makes a mistake.
It was created by Citus $as_me 12.0devel, which was It was created by Citus $as_me 12.0.1, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@ $ $0 $@
@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their # report actual input values of CONFIG_FILES etc. instead of their
# values after options handling. # values after options handling.
ac_log=" ac_log="
This file was extended by Citus $as_me 12.0devel, which was This file was extended by Citus $as_me 12.0.1, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES CONFIG_FILES = $CONFIG_FILES
@ -5455,7 +5455,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\ ac_cs_version="\\
Citus config.status 12.0devel Citus config.status 12.0.1
configured by $0, generated by GNU Autoconf 2.69, configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\" with options \\"\$ac_cs_config\\"

View File

@ -5,7 +5,7 @@
# everyone needing autoconf installed, the resulting files are checked # everyone needing autoconf installed, the resulting files are checked
# into the SCM. # into the SCM.
AC_INIT([Citus], [12.0devel]) AC_INIT([Citus], [12.0.1])
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.]) AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
# we'll need sed and awk for some of the version commands # we'll need sed and awk for some of the version commands

View File

@ -440,6 +440,19 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName,
if (!IsColocateWithDefault(colocateWithTableName) && !IsColocateWithNone( if (!IsColocateWithDefault(colocateWithTableName) && !IsColocateWithNone(
colocateWithTableName)) colocateWithTableName))
{ {
if (replicationModel != REPLICATION_MODEL_STREAMING)
{
ereport(ERROR, (errmsg("cannot create distributed table "
"concurrently because Citus allows "
"concurrent table distribution only when "
"citus.shard_replication_factor = 1"),
errhint("table %s is requested to be colocated "
"with %s which has "
"citus.shard_replication_factor > 1",
get_rel_name(relationId),
colocateWithTableName)));
}
EnsureColocateWithTableIsValid(relationId, distributionMethod, EnsureColocateWithTableIsValid(relationId, distributionMethod,
distributionColumnName, distributionColumnName,
colocateWithTableName); colocateWithTableName);

View File

@ -318,7 +318,10 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
} }
/* if no flags remain, exit early */ /* if no flags remain, exit early */
if (vacuumFlags == 0 && if (vacuumFlags & VACOPT_PROCESS_TOAST)
{
/* process toast is true by default */
if ((vacuumFlags & ~VACOPT_PROCESS_TOAST) == 0 &&
vacuumParams.truncate == VACOPTVALUE_UNSPECIFIED && vacuumParams.truncate == VACOPTVALUE_UNSPECIFIED &&
vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED && vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED &&
vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET
@ -326,6 +329,7 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
{ {
return vacuumPrefix->data; return vacuumPrefix->data;
} }
}
/* otherwise, handle options */ /* otherwise, handle options */
appendStringInfoChar(vacuumPrefix, '('); appendStringInfoChar(vacuumPrefix, '(');
@ -360,9 +364,9 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
appendStringInfoString(vacuumPrefix, "SKIP_LOCKED,"); appendStringInfoString(vacuumPrefix, "SKIP_LOCKED,");
} }
if (vacuumFlags & VACOPT_PROCESS_TOAST) if (!(vacuumFlags & VACOPT_PROCESS_TOAST))
{ {
appendStringInfoString(vacuumPrefix, "PROCESS_TOAST,"); appendStringInfoString(vacuumPrefix, "PROCESS_TOAST FALSE,");
} }
if (vacuumParams.truncate != VACOPTVALUE_UNSPECIFIED) if (vacuumParams.truncate != VACOPTVALUE_UNSPECIFIED)
@ -499,7 +503,7 @@ VacuumStmtParams(VacuumStmt *vacstmt)
bool freeze = false; bool freeze = false;
bool full = false; bool full = false;
bool disable_page_skipping = false; bool disable_page_skipping = false;
bool process_toast = false; bool process_toast = true;
/* Set default value */ /* Set default value */
params.index_cleanup = VACOPTVALUE_UNSPECIFIED; params.index_cleanup = VACOPTVALUE_UNSPECIFIED;

View File

@ -716,14 +716,14 @@ PutRemoteCopyData(MultiConnection *connection, const char *buffer, int nbytes)
Assert(PQisnonblocking(pgConn)); Assert(PQisnonblocking(pgConn));
int copyState = PQputCopyData(pgConn, buffer, nbytes); int copyState = PQputCopyData(pgConn, buffer, nbytes);
if (copyState == -1) if (copyState <= 0)
{ {
return false; return false;
} }
/* /*
* PQputCopyData may have queued up part of the data even if it managed * PQputCopyData may have queued up part of the data even if it managed
* to send some of it succesfully. We provide back pressure by waiting * to send some of it successfully. We provide back pressure by waiting
* until the socket is writable to prevent the internal libpq buffers * until the socket is writable to prevent the internal libpq buffers
* from growing excessively. * from growing excessively.
* *

View File

@ -627,6 +627,10 @@ AppendAlterTableCmdAddColumn(StringInfo buf, AlterTableCmd *alterTableCmd,
{ {
appendStringInfoString(buf, " NOT NULL"); appendStringInfoString(buf, " NOT NULL");
} }
else if (constraint->contype == CONSTR_NULL)
{
appendStringInfoString(buf, " NULL");
}
else if (constraint->contype == CONSTR_DEFAULT) else if (constraint->contype == CONSTR_DEFAULT)
{ {
char attgenerated = '\0'; char attgenerated = '\0';

View File

@ -1529,8 +1529,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/* Assert we processed the right number of columns */ /* Assert we processed the right number of columns */
#ifdef USE_ASSERT_CHECKING #ifdef USE_ASSERT_CHECKING
while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) for (int col_index = 0; col_index < colinfo->num_cols; col_index++)
{
/*
* In the above processing-loops, "i" advances only if
* the column is not new, check if this is a new column.
*/
if (colinfo->is_new_col[col_index])
i++; i++;
}
Assert(i == colinfo->num_cols); Assert(i == colinfo->num_cols);
Assert(j == nnewcolumns); Assert(j == nnewcolumns);
#endif #endif

View File

@ -1566,8 +1566,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/* Assert we processed the right number of columns */ /* Assert we processed the right number of columns */
#ifdef USE_ASSERT_CHECKING #ifdef USE_ASSERT_CHECKING
while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) for (int col_index = 0; col_index < colinfo->num_cols; col_index++)
{
/*
* In the above processing-loops, "i" advances only if
* the column is not new, check if this is a new column.
*/
if (colinfo->is_new_col[col_index])
i++; i++;
}
Assert(i == colinfo->num_cols); Assert(i == colinfo->num_cols);
Assert(j == nnewcolumns); Assert(j == nnewcolumns);
#endif #endif

View File

@ -1449,7 +1449,7 @@ RegisterCitusConfigVariables(void)
"and operating system name. This configuration value controls " "and operating system name. This configuration value controls "
"whether these reports are sent."), "whether these reports are sent."),
&EnableStatisticsCollection, &EnableStatisticsCollection,
#if defined(HAVE_LIBCURL) #if defined(HAVE_LIBCURL) && defined(ENABLE_CITUS_STATISTICS_COLLECTION)
true, true,
#else #else
false, false,

View File

@ -0,0 +1,9 @@
DROP VIEW citus_shards;
DROP VIEW IF EXISTS pg_catalog.citus_tables;
DROP VIEW IF EXISTS public.citus_tables;
DROP FUNCTION citus_shard_sizes;
#include "udfs/citus_shard_sizes/11.3-2.sql"
#include "udfs/citus_shards/11.3-2.sql"
#include "udfs/citus_tables/11.3-2.sql"

View File

@ -24,13 +24,8 @@ GRANT SELECT ON pg_catalog.pg_dist_schema TO public;
#include "udfs/citus_internal_unregister_tenant_schema_globally/12.0-1.sql" #include "udfs/citus_internal_unregister_tenant_schema_globally/12.0-1.sql"
#include "udfs/citus_drop_trigger/12.0-1.sql" #include "udfs/citus_drop_trigger/12.0-1.sql"
DROP VIEW citus_shards;
DROP VIEW IF EXISTS pg_catalog.citus_tables;
DROP VIEW IF EXISTS public.citus_tables;
DROP FUNCTION citus_shard_sizes;
#include "udfs/citus_shard_sizes/12.0-1.sql"
#include "udfs/citus_tables/12.0-1.sql" #include "udfs/citus_tables/12.0-1.sql"
DROP VIEW citus_shards;
#include "udfs/citus_shards/12.0-1.sql" #include "udfs/citus_shards/12.0-1.sql"
#include "udfs/citus_schemas/12.0-1.sql" #include "udfs/citus_schemas/12.0-1.sql"

View File

@ -0,0 +1,13 @@
DROP VIEW IF EXISTS public.citus_tables;
DROP VIEW IF EXISTS pg_catalog.citus_tables;
DROP VIEW pg_catalog.citus_shards;
DROP FUNCTION pg_catalog.citus_shard_sizes;
#include "../udfs/citus_shard_sizes/10.0-1.sql"
-- citus_shards/11.1-1.sql tries to create citus_shards in pg_catalog but it is not allowed.
-- Here we use citus_shards/10.0-1.sql to properly create the view in citus schema and
-- then alter it to pg_catalog, so citus_shards/11.1-1.sql can REPLACE it without any errors.
#include "../udfs/citus_shards/10.0-1.sql"
#include "../udfs/citus_tables/11.1-1.sql"
#include "../udfs/citus_shards/11.1-1.sql"

View File

@ -51,15 +51,9 @@ DROP VIEW IF EXISTS public.citus_tables;
DROP VIEW IF EXISTS pg_catalog.citus_tables; DROP VIEW IF EXISTS pg_catalog.citus_tables;
DROP VIEW pg_catalog.citus_shards; DROP VIEW pg_catalog.citus_shards;
DROP FUNCTION pg_catalog.citus_shard_sizes;
#include "../udfs/citus_shard_sizes/10.0-1.sql"
-- citus_shards/11.1-1.sql tries to create citus_shards in pg_catalog but it is not allowed.
-- Here we use citus_shards/10.0-1.sql to properly create the view in citus schema and
-- then alter it to pg_catalog, so citus_shards/11.1-1.sql can REPLACE it without any errors.
#include "../udfs/citus_shards/10.0-1.sql"
#include "../udfs/citus_tables/11.1-1.sql" #include "../udfs/citus_tables/11.3-2.sql"
#include "../udfs/citus_shards/11.1-1.sql" #include "../udfs/citus_shards/11.3-2.sql"
DROP TABLE pg_catalog.pg_dist_schema; DROP TABLE pg_catalog.pg_dist_schema;

View File

@ -0,0 +1,46 @@
CREATE OR REPLACE VIEW citus.citus_shards AS
SELECT
pg_dist_shard.logicalrelid AS table_name,
pg_dist_shard.shardid,
shard_name(pg_dist_shard.logicalrelid, pg_dist_shard.shardid) as shard_name,
CASE WHEN partkey IS NOT NULL THEN 'distributed' WHEN repmodel = 't' THEN 'reference' ELSE 'local' END AS citus_table_type,
colocationid AS colocation_id,
pg_dist_node.nodename,
pg_dist_node.nodeport,
size as shard_size
FROM
pg_dist_shard
JOIN
pg_dist_placement
ON
pg_dist_shard.shardid = pg_dist_placement.shardid
JOIN
pg_dist_node
ON
pg_dist_placement.groupid = pg_dist_node.groupid
JOIN
pg_dist_partition
ON
pg_dist_partition.logicalrelid = pg_dist_shard.logicalrelid
LEFT JOIN
(SELECT shard_id, max(size) as size from citus_shard_sizes() GROUP BY shard_id) as shard_sizes
ON
pg_dist_shard.shardid = shard_sizes.shard_id
WHERE
pg_dist_placement.shardstate = 1
AND
-- filter out tables owned by extensions
pg_dist_partition.logicalrelid NOT IN (
SELECT
objid
FROM
pg_depend
WHERE
classid = 'pg_class'::regclass AND refclassid = 'pg_extension'::regclass AND deptype = 'e'
)
ORDER BY
pg_dist_shard.logicalrelid::text, shardid
;
ALTER VIEW citus.citus_shards SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_shards TO public;

View File

@ -0,0 +1,55 @@
DO $$
declare
citus_tables_create_query text;
BEGIN
citus_tables_create_query=$CTCQ$
CREATE OR REPLACE VIEW %I.citus_tables AS
SELECT
logicalrelid AS table_name,
CASE WHEN partkey IS NOT NULL THEN 'distributed' ELSE
CASE when repmodel = 't' THEN 'reference' ELSE 'local' END
END AS citus_table_type,
coalesce(column_to_column_name(logicalrelid, partkey), '<none>') AS distribution_column,
colocationid AS colocation_id,
pg_size_pretty(table_sizes.table_size) AS table_size,
(select count(*) from pg_dist_shard where logicalrelid = p.logicalrelid) AS shard_count,
pg_get_userbyid(relowner) AS table_owner,
amname AS access_method
FROM
pg_dist_partition p
JOIN
pg_class c ON (p.logicalrelid = c.oid)
LEFT JOIN
pg_am a ON (a.oid = c.relam)
JOIN
(
SELECT ds.logicalrelid AS table_id, SUM(css.size) AS table_size
FROM citus_shard_sizes() css, pg_dist_shard ds
WHERE css.shard_id = ds.shardid
GROUP BY ds.logicalrelid
) table_sizes ON (table_sizes.table_id = p.logicalrelid)
WHERE
-- filter out tables owned by extensions
logicalrelid NOT IN (
SELECT
objid
FROM
pg_depend
WHERE
classid = 'pg_class'::regclass AND refclassid = 'pg_extension'::regclass AND deptype = 'e'
)
ORDER BY
logicalrelid::text;
$CTCQ$;
IF EXISTS (SELECT 1 FROM pg_namespace WHERE nspname = 'public') THEN
EXECUTE format(citus_tables_create_query, 'public');
GRANT SELECT ON public.citus_tables TO public;
ELSE
EXECUTE format(citus_tables_create_query, 'citus');
ALTER VIEW citus.citus_tables SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_tables TO public;
END IF;
END;
$$;

View File

@ -1,70 +0,0 @@
/*-------------------------------------------------------------------------
*
* pg_send_cancellation.c
*
* This file contains functions to test setting pg_send_cancellation.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "miscadmin.h"
#include "fmgr.h"
#include "port.h"
#include "postmaster/postmaster.h"
#define PG_SEND_CANCELLATION_VERSION \
"pg_send_cancellation (PostgreSQL) " PG_VERSION "\n"
/* exports for SQL callable functions */
PG_FUNCTION_INFO_V1(get_cancellation_key);
PG_FUNCTION_INFO_V1(run_pg_send_cancellation);
/*
* get_cancellation_key returns the cancellation key of the current process
* as an integer.
*/
Datum
get_cancellation_key(PG_FUNCTION_ARGS)
{
PG_RETURN_INT32(MyCancelKey);
}
/*
* run_pg_send_cancellation runs the pg_send_cancellation program with
* the specified arguments
*/
Datum
run_pg_send_cancellation(PG_FUNCTION_ARGS)
{
int pid = PG_GETARG_INT32(0);
int cancelKey = PG_GETARG_INT32(1);
char sendCancellationPath[MAXPGPATH];
char command[1024];
/* Locate executable backend before we change working directory */
if (find_other_exec(my_exec_path, "pg_send_cancellation",
PG_SEND_CANCELLATION_VERSION,
sendCancellationPath) < 0)
{
ereport(ERROR, (errmsg("could not locate pg_send_cancellation")));
}
pg_snprintf(command, sizeof(command), "%s %d %d %s %d",
sendCancellationPath, pid, cancelKey, "localhost", PostPortNumber);
if (system(command) != 0)
{
ereport(ERROR, (errmsg("failed to run command: %s", command)));
}
PG_RETURN_VOID();
}

View File

@ -141,7 +141,17 @@ SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, char *fragmentSch
fauxFunction->funcexpr = (Node *) fauxFuncExpr; fauxFunction->funcexpr = (Node *) fauxFuncExpr;
/* set the column count to pass ruleutils checks, not used elsewhere */ /* set the column count to pass ruleutils checks, not used elsewhere */
if (rte->relid != 0)
{
Relation rel = RelationIdGetRelation(rte->relid);
fauxFunction->funccolcount = RelationGetNumberOfAttributes(rel);
RelationClose(rel);
}
else
{
fauxFunction->funccolcount = list_length(rte->eref->colnames); fauxFunction->funccolcount = list_length(rte->eref->colnames);
}
fauxFunction->funccolnames = funcColumnNames; fauxFunction->funccolnames = funcColumnNames;
fauxFunction->funccoltypes = funcColumnTypes; fauxFunction->funccoltypes = funcColumnTypes;
fauxFunction->funccoltypmods = funcColumnTypeMods; fauxFunction->funccoltypmods = funcColumnTypeMods;

View File

@ -174,12 +174,11 @@ BreakColocation(Oid sourceRelationId)
*/ */
Relation pgDistColocation = table_open(DistColocationRelationId(), ExclusiveLock); Relation pgDistColocation = table_open(DistColocationRelationId(), ExclusiveLock);
uint32 newColocationId = GetNextColocationId(); uint32 oldColocationId = TableColocationId(sourceRelationId);
bool localOnly = false; CreateColocationGroupForRelation(sourceRelationId);
UpdateRelationColocationGroup(sourceRelationId, newColocationId, localOnly);
/* if there is not any remaining table in the colocation group, delete it */ /* if there is not any remaining table in the old colocation group, delete it */
DeleteColocationGroupIfNoTablesBelong(sourceRelationId); DeleteColocationGroupIfNoTablesBelong(oldColocationId);
table_close(pgDistColocation, NoLock); table_close(pgDistColocation, NoLock);
} }

View File

@ -28,6 +28,7 @@
#include "distributed/version_compat.h" #include "distributed/version_compat.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#include "storage/lockdefs.h" #include "storage/lockdefs.h"
#include "utils/catcache.h"
#include "utils/fmgroids.h" #include "utils/fmgroids.h"
#include "utils/hsearch.h" #include "utils/hsearch.h"
#include "common/hashfn.h" #include "common/hashfn.h"
@ -96,6 +97,8 @@ static List * GetConnectedListHelper(ForeignConstraintRelationshipNode *node,
bool isReferencing); bool isReferencing);
static List * GetForeignConstraintRelationshipHelper(Oid relationId, bool isReferencing); static List * GetForeignConstraintRelationshipHelper(Oid relationId, bool isReferencing);
MemoryContext ForeignConstraintRelationshipMemoryContext = NULL;
/* /*
* GetForeignKeyConnectedRelationIdList returns a list of relation id's for * GetForeignKeyConnectedRelationIdList returns a list of relation id's for
@ -321,17 +324,36 @@ CreateForeignConstraintRelationshipGraph()
return; return;
} }
ClearForeignConstraintRelationshipGraphContext(); /*
* Lazily create our memory context once and reset on every reuse.
* Since we have cleared and invalidated the fConstraintRelationshipGraph, right
* before we can simply reset the context if it was already existing.
*/
if (ForeignConstraintRelationshipMemoryContext == NULL)
{
/* make sure we've initialized CacheMemoryContext */
if (CacheMemoryContext == NULL)
{
CreateCacheMemoryContext();
}
MemoryContext fConstraintRelationshipMemoryContext = AllocSetContextCreateInternal( ForeignConstraintRelationshipMemoryContext = AllocSetContextCreate(
CacheMemoryContext, CacheMemoryContext,
"Forign Constraint Relationship Graph Context", "Foreign Constraint Relationship Graph Context",
ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE); ALLOCSET_DEFAULT_MAXSIZE);
}
else
{
fConstraintRelationshipGraph = NULL;
MemoryContextReset(ForeignConstraintRelationshipMemoryContext);
}
Assert(fConstraintRelationshipGraph == NULL);
MemoryContext oldContext = MemoryContextSwitchTo( MemoryContext oldContext = MemoryContextSwitchTo(
fConstraintRelationshipMemoryContext); ForeignConstraintRelationshipMemoryContext);
fConstraintRelationshipGraph = (ForeignConstraintRelationshipGraph *) palloc( fConstraintRelationshipGraph = (ForeignConstraintRelationshipGraph *) palloc(
sizeof(ForeignConstraintRelationshipGraph)); sizeof(ForeignConstraintRelationshipGraph));
@ -631,22 +653,3 @@ CreateOrFindNode(HTAB *adjacencyLists, Oid relid)
return node; return node;
} }
/*
* ClearForeignConstraintRelationshipGraphContext clear all the allocated memory obtained
* for foreign constraint relationship graph. Since all the variables of relationship
* graph was obtained within the same context, destroying hash map is enough as
* it deletes the context.
*/
void
ClearForeignConstraintRelationshipGraphContext()
{
if (fConstraintRelationshipGraph == NULL)
{
return;
}
hash_destroy(fConstraintRelationshipGraph->nodeMap);
fConstraintRelationshipGraph = NULL;
}

View File

@ -14,7 +14,11 @@
#include "fmgr.h" #include "fmgr.h"
#include "utils/uuid.h" #include "utils/uuid.h"
#if defined(HAVE_LIBCURL) && defined(ENABLE_CITUS_STATISTICS_COLLECTION)
bool EnableStatisticsCollection = true; /* send basic usage statistics to Citus */ bool EnableStatisticsCollection = true; /* send basic usage statistics to Citus */
#else
bool EnableStatisticsCollection = false;
#endif
PG_FUNCTION_INFO_V1(citus_server_id); PG_FUNCTION_INFO_V1(citus_server_id);

View File

@ -1 +0,0 @@
pg_send_cancellation

View File

@ -1,24 +0,0 @@
citus_top_builddir = ../../..
PROGRAM = pg_send_cancellation
PGFILEDESC = "pg_send_cancellation sends a custom cancellation message"
OBJS = $(citus_abs_srcdir)/src/bin/pg_send_cancellation/pg_send_cancellation.o
PG_CPPFLAGS = -I$(libpq_srcdir)
PG_LIBS_INTERNAL = $(libpq_pgport)
PG_LDFLAGS += $(LDFLAGS)
include $(citus_top_builddir)/Makefile.global
# We reuse all the Citus flags (incl. security flags), but we are building a program not a shared library
# We sometimes build Citus with a newer version of gcc than Postgres was built
# with and this breaks LTO (link-time optimization). Even if disabling it can
# have some perf impact this is ok because pg_send_cancellation is only used
# for tests anyway.
override CFLAGS := $(filter-out -shared, $(CFLAGS)) -fno-lto
# Filter out unneeded dependencies
override LIBS := $(filter-out -lz -lreadline -ledit -ltermcap -lncurses -lcurses -lpam, $(LIBS))
clean: clean-pg_send_cancellation
clean-pg_send_cancellation:
rm -f $(PROGRAM) $(OBJS)

View File

@ -1,47 +0,0 @@
# pg_send_cancellation
pg_send_cancellation is a program for manually sending a cancellation
to a Postgres endpoint. It is effectively a command-line version of
PQcancel in libpq, but it can use any PID or cancellation key.
We use pg_send_cancellation primarily to propagate cancellations between pgbouncers
behind a load balancer. Since the cancellation protocol involves
opening a new connection, the new connection may go to a different
node that does not recognize the cancellation key. To handle that
scenario, we modified pgbouncer to pass unrecognized cancellation
keys to a shell command.
Users can configure the cancellation_command, which will be run with:
```
<cancellation_command> <client ip> <client port> <pid> <cancel key>
```
Note that pgbouncer does not use actual PIDs. Instead, it generates PID and cancellation key together a random 8-byte number. This makes the chance of collisions exceedingly small.
By providing pg_send_cancellation as part of Citus, we can use a shell script that pgbouncer invokes to propagate the cancellation to all *other* worker nodes in the same cluster, for example:
```bash
#!/bin/sh
remote_ip=$1
remote_port=$2
pid=$3
cancel_key=$4
postgres_path=/usr/pgsql-14/bin
pgbouncer_port=6432
nodes_query="select nodename from pg_dist_node where groupid > 0 and groupid not in (select groupid from pg_dist_local_group) and nodecluster = current_setting('citus.cluster_name')"
# Get hostnames of other worker nodes in the cluster, and send cancellation to their pgbouncers
$postgres_path/psql -c "$nodes_query" -tAX | xargs -n 1 sh -c "$postgres_path/pg_send_cancellation $pid $cancel_key \$0 $pgbouncer_port"
```
One thing we need to be careful about is that the cancellations do not get forwarded
back-and-forth. This is handled in pgbouncer by setting the last bit of all generated
cancellation keys (sent to clients) to 1, and setting the last bit of all forwarded bits to 0.
That way, when a pgbouncer receives a cancellation key with the last bit set to 0,
it knows it is from another pgbouncer and should not forward further, and should set
the last bit to 1 when comparing to stored cancellation keys.
Another thing we need to be careful about is that the integers should be encoded
as big endian on the wire.

View File

@ -1,261 +0,0 @@
/*
* pg_send_cancellation is a program for manually sending a cancellation
* to a Postgres endpoint. It is effectively a command-line version of
* PQcancel in libpq, but it can use any PID or cancellation key.
*
* Portions Copyright (c) Citus Data, Inc.
*
* For the internal_cancel function:
*
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose, without fee, and without a written agreement
* is hereby granted, provided that the above copyright notice and this
* paragraph and the following two paragraphs appear in all copies.
*
* IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
* LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
* DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
*/
#include "postgres_fe.h"
#include <sys/stat.h>
#include <fcntl.h>
#include <ctype.h>
#include <time.h>
#include <unistd.h>
#include "common/ip.h"
#include "common/link-canary.h"
#include "common/scram-common.h"
#include "common/string.h"
#include "libpq-fe.h"
#include "libpq-int.h"
#include "mb/pg_wchar.h"
#include "port/pg_bswap.h"
#define ERROR_BUFFER_SIZE 256
static int internal_cancel(SockAddr *raddr, int be_pid, int be_key,
char *errbuf, int errbufsize);
/*
* main entry point into the pg_send_cancellation program.
*/
int
main(int argc, char *argv[])
{
if (argc == 2 && strcmp(argv[1], "-V") == 0)
{
pg_fprintf(stdout, "pg_send_cancellation (PostgreSQL) " PG_VERSION "\n");
return 0;
}
if (argc < 4 || argc > 5)
{
char *program = argv[0];
pg_fprintf(stderr, "%s requires 4 arguments\n\n", program);
pg_fprintf(stderr, "Usage: %s <pid> <cancel key> <hostname> [port]\n", program);
return 1;
}
char *pidString = argv[1];
char *cancelKeyString = argv[2];
char *host = argv[3];
char *portString = "5432";
if (argc >= 5)
{
portString = argv[4];
}
/* parse the PID and cancellation key */
int pid = strtol(pidString, NULL, 10);
int cancelAuthCode = strtol(cancelKeyString, NULL, 10);
char errorBuffer[ERROR_BUFFER_SIZE] = { 0 };
struct addrinfo *ipAddressList;
struct addrinfo hint;
int ipAddressListFamily = AF_UNSPEC;
SockAddr socketAddress;
memset(&hint, 0, sizeof(hint));
hint.ai_socktype = SOCK_STREAM;
hint.ai_family = ipAddressListFamily;
/* resolve the hostname to an IP */
int ret = pg_getaddrinfo_all(host, portString, &hint, &ipAddressList);
if (ret || !ipAddressList)
{
pg_fprintf(stderr, "could not translate host name \"%s\" to address: %s\n",
host, gai_strerror(ret));
return 1;
}
if (ipAddressList->ai_addrlen > sizeof(socketAddress.addr))
{
pg_fprintf(stderr, "invalid address length");
return 1;
}
/*
* Explanation of IGNORE-BANNED:
* This is a common pattern when using getaddrinfo. The system guarantees
* that ai_addrlen < sizeof(socketAddress.addr). Out of an abundance of
* caution. We also check it above.
*/
memcpy(&socketAddress.addr, ipAddressList->ai_addr, ipAddressList->ai_addrlen); /* IGNORE-BANNED */
socketAddress.salen = ipAddressList->ai_addrlen;
/* send the cancellation */
bool cancelSucceeded = internal_cancel(&socketAddress, pid, cancelAuthCode,
errorBuffer, sizeof(errorBuffer));
if (!cancelSucceeded)
{
pg_fprintf(stderr, "sending cancellation to %s:%s failed: %s",
host, portString, errorBuffer);
return 1;
}
pg_freeaddrinfo_all(ipAddressListFamily, ipAddressList);
return 0;
}
/* *INDENT-OFF* */
/*
* internal_cancel is copied from fe-connect.c
*
* The return value is true if the cancel request was successfully
* dispatched, false if not (in which case an error message is available).
* Note: successful dispatch is no guarantee that there will be any effect at
* the backend. The application must read the operation result as usual.
*
* CAUTION: we want this routine to be safely callable from a signal handler
* (for example, an application might want to call it in a SIGINT handler).
* This means we cannot use any C library routine that might be non-reentrant.
* malloc/free are often non-reentrant, and anything that might call them is
* just as dangerous. We avoid sprintf here for that reason. Building up
* error messages with strcpy/strcat is tedious but should be quite safe.
* We also save/restore errno in case the signal handler support doesn't.
*
* internal_cancel() is an internal helper function to make code-sharing
* between the two versions of the cancel function possible.
*/
static int
internal_cancel(SockAddr *raddr, int be_pid, int be_key,
char *errbuf, int errbufsize)
{
int save_errno = SOCK_ERRNO;
pgsocket tmpsock = PGINVALID_SOCKET;
char sebuf[PG_STRERROR_R_BUFLEN];
int maxlen;
struct
{
uint32 packetlen;
CancelRequestPacket cp;
} crp;
/*
* We need to open a temporary connection to the postmaster. Do this with
* only kernel calls.
*/
if ((tmpsock = socket(raddr->addr.ss_family, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
{
strlcpy(errbuf, "PQcancel() -- socket() failed: ", errbufsize);
goto cancel_errReturn;
}
retry3:
if (connect(tmpsock, (struct sockaddr *) &raddr->addr, raddr->salen) < 0)
{
if (SOCK_ERRNO == EINTR)
/* Interrupted system call - we'll just try again */
goto retry3;
strlcpy(errbuf, "PQcancel() -- connect() failed: ", errbufsize);
goto cancel_errReturn;
}
/*
* We needn't set nonblocking I/O or NODELAY options here.
*/
/* Create and send the cancel request packet. */
crp.packetlen = pg_hton32((uint32) sizeof(crp));
crp.cp.cancelRequestCode = (MsgType) pg_hton32(CANCEL_REQUEST_CODE);
crp.cp.backendPID = pg_hton32(be_pid);
crp.cp.cancelAuthCode = pg_hton32(be_key);
retry4:
if (send(tmpsock, (char *) &crp, sizeof(crp), 0) != (int) sizeof(crp))
{
if (SOCK_ERRNO == EINTR)
/* Interrupted system call - we'll just try again */
goto retry4;
strlcpy(errbuf, "PQcancel() -- send() failed: ", errbufsize);
goto cancel_errReturn;
}
/*
* Wait for the postmaster to close the connection, which indicates that
* it's processed the request. Without this delay, we might issue another
* command only to find that our cancel zaps that command instead of the
* one we thought we were canceling. Note we don't actually expect this
* read to obtain any data, we are just waiting for EOF to be signaled.
*/
retry5:
if (recv(tmpsock, (char *) &crp, 1, 0) < 0)
{
if (SOCK_ERRNO == EINTR)
/* Interrupted system call - we'll just try again */
goto retry5;
/* we ignore other error conditions */
}
/* All done */
closesocket(tmpsock);
SOCK_ERRNO_SET(save_errno);
return true;
cancel_errReturn:
/*
* Make sure we don't overflow the error buffer. Leave space for the \n at
* the end, and for the terminating zero.
*/
maxlen = errbufsize - strlen(errbuf) - 2;
if (maxlen >= 0)
{
/*
* Explanation of IGNORE-BANNED:
* This is well-tested libpq code that we would like to preserve in its
* original form. The appropriate length calculation is done above.
*/
strncat(errbuf, SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)), /* IGNORE-BANNED */
maxlen);
strcat(errbuf, "\n"); /* IGNORE-BANNED */
}
if (tmpsock != PGINVALID_SOCKET)
closesocket(tmpsock);
SOCK_ERRNO_SET(save_errno);
return false;
}
/* *INDENT-ON* */

View File

@ -20,7 +20,6 @@ extern bool ShouldUndistributeCitusLocalTable(Oid relationId);
extern List * ReferencedRelationIdList(Oid relationId); extern List * ReferencedRelationIdList(Oid relationId);
extern List * ReferencingRelationIdList(Oid relationId); extern List * ReferencingRelationIdList(Oid relationId);
extern void SetForeignConstraintRelationshipGraphInvalid(void); extern void SetForeignConstraintRelationshipGraphInvalid(void);
extern void ClearForeignConstraintRelationshipGraphContext(void);
extern bool OidVisited(HTAB *oidVisitedMap, Oid oid); extern bool OidVisited(HTAB *oidVisitedMap, Oid oid);
extern void VisitOid(HTAB *oidVisitedMap, Oid oid); extern void VisitOid(HTAB *oidVisitedMap, Oid oid);

View File

@ -10,7 +10,6 @@ test: isolation_move_placement_vs_modification
test: isolation_move_placement_vs_modification_fk test: isolation_move_placement_vs_modification_fk
test: isolation_tenant_isolation_with_fkey_to_reference test: isolation_tenant_isolation_with_fkey_to_reference
test: isolation_ref2ref_foreign_keys_enterprise test: isolation_ref2ref_foreign_keys_enterprise
test: isolation_pg_send_cancellation
test: isolation_shard_move_vs_start_metadata_sync test: isolation_shard_move_vs_start_metadata_sync
test: isolation_tenant_isolation test: isolation_tenant_isolation
test: isolation_tenant_isolation_nonblocking test: isolation_tenant_isolation_nonblocking

View File

@ -35,7 +35,7 @@ ALTER TABLE referencing ADD COLUMN test_1 integer DEFAULT (alter_table_add_colum
ALTER TABLE referencing ADD COLUMN test_2 integer UNIQUE REFERENCES referenced(int_col) ON UPDATE CASCADE ON DELETE SET DEFAULT NOT DEFERRABLE INITIALLY IMMEDIATE; ALTER TABLE referencing ADD COLUMN test_2 integer UNIQUE REFERENCES referenced(int_col) ON UPDATE CASCADE ON DELETE SET DEFAULT NOT DEFERRABLE INITIALLY IMMEDIATE;
ALTER TABLE referencing ADD COLUMN test_3 integer GENERATED ALWAYS AS (test_1 * alter_table_add_column_other_schema.my_random(1)) STORED UNIQUE REFERENCES referenced(int_col) MATCH FULL; ALTER TABLE referencing ADD COLUMN test_3 integer GENERATED ALWAYS AS (test_1 * alter_table_add_column_other_schema.my_random(1)) STORED UNIQUE REFERENCES referenced(int_col) MATCH FULL;
ALTER TABLE referencing ADD COLUMN test_4 integer PRIMARY KEY WITH (fillfactor=70) NOT NULL REFERENCES referenced(int_col) MATCH SIMPLE ON UPDATE CASCADE ON DELETE SET DEFAULT; ALTER TABLE referencing ADD COLUMN test_4 integer PRIMARY KEY WITH (fillfactor=70) NOT NULL REFERENCES referenced(int_col) MATCH SIMPLE ON UPDATE CASCADE ON DELETE SET DEFAULT;
ALTER TABLE referencing ADD COLUMN test_5 integer CONSTRAINT unique_c UNIQUE WITH (fillfactor=50); ALTER TABLE referencing ADD COLUMN test_5 integer CONSTRAINT unique_c UNIQUE WITH (fillfactor=50) NULL;
ALTER TABLE referencing ADD COLUMN test_6 text COMPRESSION pglz COLLATE caseinsensitive NOT NULL; ALTER TABLE referencing ADD COLUMN test_6 text COMPRESSION pglz COLLATE caseinsensitive NOT NULL;
ALTER TABLE referencing ADD COLUMN "test_\'!7" "simple_!\'custom_type"; ALTER TABLE referencing ADD COLUMN "test_\'!7" "simple_!\'custom_type";
-- we give up deparsing ALTER TABLE command if it needs to create a check constraint, and we fallback to legacy behavior -- we give up deparsing ALTER TABLE command if it needs to create a check constraint, and we fallback to legacy behavior

View File

@ -36,6 +36,19 @@ set citus.shard_replication_factor to 2;
select create_distributed_table_concurrently('test','key', 'hash'); select create_distributed_table_concurrently('test','key', 'hash');
ERROR: cannot distribute a table concurrently when citus.shard_replication_factor > 1 ERROR: cannot distribute a table concurrently when citus.shard_replication_factor > 1
set citus.shard_replication_factor to 1; set citus.shard_replication_factor to 1;
set citus.shard_replication_factor to 2;
create table dist_1(a int);
select create_distributed_table('dist_1', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
set citus.shard_replication_factor to 1;
create table dist_2(a int);
select create_distributed_table_concurrently('dist_2', 'a', colocate_with=>'dist_1');
ERROR: cannot create distributed table concurrently because Citus allows concurrent table distribution only when citus.shard_replication_factor = 1
HINT: table dist_2 is requested to be colocated with dist_1 which has citus.shard_replication_factor > 1
begin; begin;
select create_distributed_table_concurrently('test','key'); select create_distributed_table_concurrently('test','key');
ERROR: create_distributed_table_concurrently cannot run inside a transaction block ERROR: create_distributed_table_concurrently cannot run inside a transaction block
@ -138,27 +151,8 @@ select count(*) from test;
rollback; rollback;
-- verify that we can undistribute the table -- verify that we can undistribute the table
begin; begin;
set local client_min_messages to warning;
select undistribute_table('test', cascade_via_foreign_keys := true); select undistribute_table('test', cascade_via_foreign_keys := true);
NOTICE: converting the partitions of create_distributed_table_concurrently.test
NOTICE: creating a new table for create_distributed_table_concurrently.test
NOTICE: dropping the old create_distributed_table_concurrently.test
NOTICE: renaming the new table to create_distributed_table_concurrently.test
NOTICE: creating a new table for create_distributed_table_concurrently.ref
NOTICE: moving the data of create_distributed_table_concurrently.ref
NOTICE: dropping the old create_distributed_table_concurrently.ref
NOTICE: drop cascades to constraint test_id_fkey_1190041 on table create_distributed_table_concurrently.test_1190041
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
SQL statement "DROP TABLE create_distributed_table_concurrently.ref CASCADE"
NOTICE: renaming the new table to create_distributed_table_concurrently.ref
NOTICE: creating a new table for create_distributed_table_concurrently.test_1
NOTICE: moving the data of create_distributed_table_concurrently.test_1
NOTICE: dropping the old create_distributed_table_concurrently.test_1
NOTICE: renaming the new table to create_distributed_table_concurrently.test_1
NOTICE: creating a new table for create_distributed_table_concurrently.test_2
NOTICE: moving the data of create_distributed_table_concurrently.test_2
NOTICE: dropping the old create_distributed_table_concurrently.test_2
NOTICE: renaming the new table to create_distributed_table_concurrently.test_2
undistribute_table undistribute_table
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -210,6 +210,7 @@ select create_distributed_table('partitioned_tbl_with_fkey','x');
create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31'); create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31');
create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31'); create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31');
create table partition_3_with_fkey partition of partitioned_tbl_with_fkey for values from ('2024-01-01') to ('2024-12-31');
insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s; insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s;
ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id); ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id);
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'partitioned_tbl_with_fkey'::regclass ORDER BY shardid LIMIT 1) WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'partitioned_tbl_with_fkey'::regclass ORDER BY shardid LIMIT 1)

View File

@ -1,42 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s1-register s2-lock s1-lock s2-wrong-cancel-1 s2-wrong-cancel-2 s2-cancel
step s1-register:
INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
step s2-lock:
BEGIN;
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
step s1-lock:
BEGIN;
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
END;
<waiting ...>
step s2-wrong-cancel-1:
SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
run_pg_send_cancellation
---------------------------------------------------------------------
(1 row)
step s2-wrong-cancel-2:
SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
run_pg_send_cancellation
---------------------------------------------------------------------
(1 row)
step s2-cancel:
SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
END;
run_pg_send_cancellation
---------------------------------------------------------------------
(1 row)
step s1-lock: <... completed>
ERROR: canceling statement due to user request

View File

@ -1349,6 +1349,77 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.p
(schema,{test_schema_for_sequence_propagation},{}) (schema,{test_schema_for_sequence_propagation},{})
(1 row) (1 row)
-- Bug: https://github.com/citusdata/citus/issues/7378
-- Create a reference table
CREATE TABLE tbl_ref_mats(row_id integer primary key);
INSERT INTO tbl_ref_mats VALUES (1), (2);
SELECT create_reference_table('tbl_ref_mats');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_alter_table_statements.tbl_ref_mats$$)
create_reference_table
---------------------------------------------------------------------
(1 row)
-- Create a distributed table
CREATE TABLE tbl_dist_mats(series_id integer);
INSERT INTO tbl_dist_mats VALUES (1), (1), (2), (2);
SELECT create_distributed_table('tbl_dist_mats', 'series_id');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_alter_table_statements.tbl_dist_mats$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- Create a view that joins the distributed table with the reference table on the distribution key.
CREATE VIEW vw_citus_views as
SELECT d.series_id FROM tbl_dist_mats d JOIN tbl_ref_mats r ON d.series_id = r.row_id;
-- The view initially works fine
SELECT * FROM vw_citus_views ORDER BY 1;
series_id
---------------------------------------------------------------------
1
1
2
2
(4 rows)
-- Now, alter the table
ALTER TABLE tbl_ref_mats ADD COLUMN category1 varchar(50);
SELECT * FROM vw_citus_views ORDER BY 1;
series_id
---------------------------------------------------------------------
1
1
2
2
(4 rows)
ALTER TABLE tbl_ref_mats ADD COLUMN category2 varchar(50);
SELECT * FROM vw_citus_views ORDER BY 1;
series_id
---------------------------------------------------------------------
1
1
2
2
(4 rows)
ALTER TABLE tbl_ref_mats DROP COLUMN category1;
SELECT * FROM vw_citus_views ORDER BY 1;
series_id
---------------------------------------------------------------------
1
1
2
2
(4 rows)
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA test_schema_for_sequence_propagation CASCADE; DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
DROP TABLE table_without_sequence; DROP TABLE table_without_sequence;

View File

@ -1331,7 +1331,23 @@ SELECT * FROM multi_extension.print_extension_changes();
| view citus_stat_tenants_local | view citus_stat_tenants_local
(11 rows) (11 rows)
-- Test downgrade to 11.3-1 from 12.0-1 -- Test downgrade to 11.3-1 from 11.3-2
ALTER EXTENSION citus UPDATE TO '11.3-2';
ALTER EXTENSION citus UPDATE TO '11.3-1';
-- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
---------------------------------------------------------------------
(0 rows)
-- Snapshot of state at 11.3-2
ALTER EXTENSION citus UPDATE TO '11.3-2';
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
---------------------------------------------------------------------
(0 rows)
-- Test downgrade to 11.3-2 from 12.0-1
ALTER EXTENSION citus UPDATE TO '12.0-1'; ALTER EXTENSION citus UPDATE TO '12.0-1';
CREATE TABLE null_shard_key (x int, y int); CREATE TABLE null_shard_key (x int, y int);
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -1341,15 +1357,15 @@ SELECT create_distributed_table('null_shard_key', null);
(1 row) (1 row)
-- Show that we cannot downgrade to 11.3-1 becuase the cluster has a -- Show that we cannot downgrade to 11.3-2 becuase the cluster has a
-- distributed table with single-shard. -- distributed table with single-shard.
ALTER EXTENSION citus UPDATE TO '11.3-1'; ALTER EXTENSION citus UPDATE TO '11.3-2';
ERROR: cannot downgrade Citus because there are distributed tables without a shard key. ERROR: cannot downgrade Citus because there are distributed tables without a shard key.
DETAIL: To downgrade Citus to an older version, you should first convert those tables to Postgres tables by executing SELECT undistribute_table("%s"). DETAIL: To downgrade Citus to an older version, you should first convert those tables to Postgres tables by executing SELECT undistribute_table("%s").
HINT: You can find the distributed tables without a shard key in the cluster by using the following query: "SELECT * FROM citus_tables WHERE distribution_column = '<none>' AND colocation_id > 0". HINT: You can find the distributed tables without a shard key in the cluster by using the following query: "SELECT * FROM citus_tables WHERE distribution_column = '<none>' AND colocation_id > 0".
CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE
DROP TABLE null_shard_key; DROP TABLE null_shard_key;
ALTER EXTENSION citus UPDATE TO '11.3-1'; ALTER EXTENSION citus UPDATE TO '11.3-2';
-- Should be empty result since upgrade+downgrade should be a no-op -- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes(); SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object previous_object | current_object
@ -1376,7 +1392,7 @@ DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
SHOW citus.version; SHOW citus.version;
citus.version citus.version
--------------------------------------------------------------------- ---------------------------------------------------------------------
12.0devel 12.0.1
(1 row) (1 row)
-- ensure no unexpected objects were created outside pg_catalog -- ensure no unexpected objects were created outside pg_catalog

View File

@ -18,21 +18,21 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL) pg14.t1_980001 NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST) t1; VACUUM (FULL, PROCESS_TOAST) t1;
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000 NOTICE: issuing VACUUM (FULL) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001 NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST true) t1; VACUUM (FULL, PROCESS_TOAST true) t1;
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000 NOTICE: issuing VACUUM (FULL) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001 NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST false) t1; VACUUM (FULL, PROCESS_TOAST false) t1;
ERROR: PROCESS_TOAST required with VACUUM FULL ERROR: PROCESS_TOAST required with VACUUM FULL
VACUUM (PROCESS_TOAST false) t1; VACUUM (PROCESS_TOAST false) t1;
NOTICE: issuing VACUUM pg14.t1_980000 NOTICE: issuing VACUUM (PROCESS_TOAST FALSE) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM pg14.t1_980001 NOTICE: issuing VACUUM (PROCESS_TOAST FALSE) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP AUTO) t1; VACUUM (INDEX_CLEANUP AUTO) t1;
NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980000 NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980000
@ -62,14 +62,14 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP "AUTOX") t1; VACUUM (INDEX_CLEANUP "AUTOX") t1;
ERROR: index_cleanup requires a Boolean value ERROR: index_cleanup requires a Boolean value
VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1; VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1;
NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,PROCESS_TOAST,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980000 NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,PROCESS_TOAST,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980001 NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1; VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1;
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000 NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001 NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- vacuum (process_toast true) should be vacuuming toast tables (default is true) -- vacuum (process_toast true) should be vacuuming toast tables (default is true)
CREATE TABLE local_vacuum_table(name text); CREATE TABLE local_vacuum_table(name text);

View File

@ -1,65 +0,0 @@
setup
{
CREATE FUNCTION run_pg_send_cancellation(int,int)
RETURNS void
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION get_cancellation_key()
RETURNS int
AS 'citus'
LANGUAGE C STRICT;
CREATE TABLE cancel_table (pid int, cancel_key int);
}
teardown
{
DROP TABLE IF EXISTS cancel_table;
}
session "s1"
/* store the PID and cancellation key of session 1 */
step "s1-register"
{
INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
}
/* lock the table from session 1, will block and get cancelled */
step "s1-lock"
{
BEGIN;
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
END;
}
session "s2"
/* lock the table from session 2 to block session 1 */
step "s2-lock"
{
BEGIN;
LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
}
/* PID mismatch */
step "s2-wrong-cancel-1"
{
SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
}
/* cancellation key mismatch */
step "s2-wrong-cancel-2"
{
SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
}
/* cancel the LOCK statement in session 1 */
step "s2-cancel"
{
SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
END;
}
permutation "s1-register" "s2-lock" "s1-lock" "s2-wrong-cancel-1" "s2-wrong-cancel-2" "s2-cancel"

View File

@ -33,7 +33,7 @@ ALTER TABLE referencing ADD COLUMN test_1 integer DEFAULT (alter_table_add_colum
ALTER TABLE referencing ADD COLUMN test_2 integer UNIQUE REFERENCES referenced(int_col) ON UPDATE CASCADE ON DELETE SET DEFAULT NOT DEFERRABLE INITIALLY IMMEDIATE; ALTER TABLE referencing ADD COLUMN test_2 integer UNIQUE REFERENCES referenced(int_col) ON UPDATE CASCADE ON DELETE SET DEFAULT NOT DEFERRABLE INITIALLY IMMEDIATE;
ALTER TABLE referencing ADD COLUMN test_3 integer GENERATED ALWAYS AS (test_1 * alter_table_add_column_other_schema.my_random(1)) STORED UNIQUE REFERENCES referenced(int_col) MATCH FULL; ALTER TABLE referencing ADD COLUMN test_3 integer GENERATED ALWAYS AS (test_1 * alter_table_add_column_other_schema.my_random(1)) STORED UNIQUE REFERENCES referenced(int_col) MATCH FULL;
ALTER TABLE referencing ADD COLUMN test_4 integer PRIMARY KEY WITH (fillfactor=70) NOT NULL REFERENCES referenced(int_col) MATCH SIMPLE ON UPDATE CASCADE ON DELETE SET DEFAULT; ALTER TABLE referencing ADD COLUMN test_4 integer PRIMARY KEY WITH (fillfactor=70) NOT NULL REFERENCES referenced(int_col) MATCH SIMPLE ON UPDATE CASCADE ON DELETE SET DEFAULT;
ALTER TABLE referencing ADD COLUMN test_5 integer CONSTRAINT unique_c UNIQUE WITH (fillfactor=50); ALTER TABLE referencing ADD COLUMN test_5 integer CONSTRAINT unique_c UNIQUE WITH (fillfactor=50) NULL;
ALTER TABLE referencing ADD COLUMN test_6 text COMPRESSION pglz COLLATE caseinsensitive NOT NULL; ALTER TABLE referencing ADD COLUMN test_6 text COMPRESSION pglz COLLATE caseinsensitive NOT NULL;
ALTER TABLE referencing ADD COLUMN "test_\'!7" "simple_!\'custom_type"; ALTER TABLE referencing ADD COLUMN "test_\'!7" "simple_!\'custom_type";

View File

@ -28,6 +28,14 @@ set citus.shard_replication_factor to 2;
select create_distributed_table_concurrently('test','key', 'hash'); select create_distributed_table_concurrently('test','key', 'hash');
set citus.shard_replication_factor to 1; set citus.shard_replication_factor to 1;
set citus.shard_replication_factor to 2;
create table dist_1(a int);
select create_distributed_table('dist_1', 'a');
set citus.shard_replication_factor to 1;
create table dist_2(a int);
select create_distributed_table_concurrently('dist_2', 'a', colocate_with=>'dist_1');
begin; begin;
select create_distributed_table_concurrently('test','key'); select create_distributed_table_concurrently('test','key');
rollback; rollback;
@ -63,6 +71,7 @@ rollback;
-- verify that we can undistribute the table -- verify that we can undistribute the table
begin; begin;
set local client_min_messages to warning;
select undistribute_table('test', cascade_via_foreign_keys := true); select undistribute_table('test', cascade_via_foreign_keys := true);
rollback; rollback;

View File

@ -84,6 +84,7 @@ create table partitioned_tbl_with_fkey (x int, y int, t timestamptz default now(
select create_distributed_table('partitioned_tbl_with_fkey','x'); select create_distributed_table('partitioned_tbl_with_fkey','x');
create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31'); create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31');
create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31'); create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31');
create table partition_3_with_fkey partition of partitioned_tbl_with_fkey for values from ('2024-01-01') to ('2024-12-31');
insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s; insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s;
ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id); ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id);

View File

@ -727,6 +727,32 @@ ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_sch
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass);
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace); SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace);
-- Bug: https://github.com/citusdata/citus/issues/7378
-- Create a reference table
CREATE TABLE tbl_ref_mats(row_id integer primary key);
INSERT INTO tbl_ref_mats VALUES (1), (2);
SELECT create_reference_table('tbl_ref_mats');
-- Create a distributed table
CREATE TABLE tbl_dist_mats(series_id integer);
INSERT INTO tbl_dist_mats VALUES (1), (1), (2), (2);
SELECT create_distributed_table('tbl_dist_mats', 'series_id');
-- Create a view that joins the distributed table with the reference table on the distribution key.
CREATE VIEW vw_citus_views as
SELECT d.series_id FROM tbl_dist_mats d JOIN tbl_ref_mats r ON d.series_id = r.row_id;
-- The view initially works fine
SELECT * FROM vw_citus_views ORDER BY 1;
-- Now, alter the table
ALTER TABLE tbl_ref_mats ADD COLUMN category1 varchar(50);
SELECT * FROM vw_citus_views ORDER BY 1;
ALTER TABLE tbl_ref_mats ADD COLUMN category2 varchar(50);
SELECT * FROM vw_citus_views ORDER BY 1;
ALTER TABLE tbl_ref_mats DROP COLUMN category1;
SELECT * FROM vw_citus_views ORDER BY 1;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA test_schema_for_sequence_propagation CASCADE; DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
DROP TABLE table_without_sequence; DROP TABLE table_without_sequence;

View File

@ -591,20 +591,30 @@ SELECT * FROM multi_extension.print_extension_changes();
ALTER EXTENSION citus UPDATE TO '11.3-1'; ALTER EXTENSION citus UPDATE TO '11.3-1';
SELECT * FROM multi_extension.print_extension_changes(); SELECT * FROM multi_extension.print_extension_changes();
-- Test downgrade to 11.3-1 from 12.0-1 -- Test downgrade to 11.3-1 from 11.3-2
ALTER EXTENSION citus UPDATE TO '11.3-2';
ALTER EXTENSION citus UPDATE TO '11.3-1';
-- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes();
-- Snapshot of state at 11.3-2
ALTER EXTENSION citus UPDATE TO '11.3-2';
SELECT * FROM multi_extension.print_extension_changes();
-- Test downgrade to 11.3-2 from 12.0-1
ALTER EXTENSION citus UPDATE TO '12.0-1'; ALTER EXTENSION citus UPDATE TO '12.0-1';
CREATE TABLE null_shard_key (x int, y int); CREATE TABLE null_shard_key (x int, y int);
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('null_shard_key', null); SELECT create_distributed_table('null_shard_key', null);
-- Show that we cannot downgrade to 11.3-1 becuase the cluster has a -- Show that we cannot downgrade to 11.3-2 becuase the cluster has a
-- distributed table with single-shard. -- distributed table with single-shard.
ALTER EXTENSION citus UPDATE TO '11.3-1'; ALTER EXTENSION citus UPDATE TO '11.3-2';
DROP TABLE null_shard_key; DROP TABLE null_shard_key;
ALTER EXTENSION citus UPDATE TO '11.3-1'; ALTER EXTENSION citus UPDATE TO '11.3-2';
-- Should be empty result since upgrade+downgrade should be a no-op -- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes(); SELECT * FROM multi_extension.print_extension_changes();