diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 376c44331..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,1128 +0,0 @@ -version: 2.1 -orbs: - codecov: codecov/codecov@1.1.1 - azure-cli: circleci/azure-cli@1.0.0 - -parameters: - image_suffix: - type: string - default: '-v9d71045' - pg14_version: - type: string - default: '14.9' - pg15_version: - type: string - default: '15.4' - pg16_version: - type: string - default: '16.0' - upgrade_pg_versions: - type: string - default: '14.9-15.4-16.0' - style_checker_tools_version: - type: string - default: '0.8.18' - flaky_test: - type: string - default: '' - flaky_test_runs_per_job: - type: integer - default: 50 - skip_flaky_tests: - type: boolean - default: false - -commands: - install_extension: - parameters: - pg_major: - description: 'postgres major version to use' - type: integer - steps: - - run: - name: 'Install Extension' - command: | - tar xfv "${CIRCLE_WORKING_DIRECTORY}/install-<< parameters.pg_major >>.tar" --directory / - - configure: - steps: - - run: - name: 'Configure' - command: | - chown -R circleci . - gosu circleci ./configure --without-pg-version-check - - enable_core: - steps: - - run: - name: 'Enable core dumps' - command: | - ulimit -c unlimited - - save_regressions: - steps: - - run: - name: 'Regressions' - command: | - if [ -f "src/test/regress/regression.diffs" ]; then - cat src/test/regress/regression.diffs - exit 1 - fi - when: on_fail - - store_artifacts: - name: 'Save regressions' - path: src/test/regress/regression.diffs - - save_logs_and_results: - steps: - - store_artifacts: - name: 'Save mitmproxy output (failure test specific)' - path: src/test/regress/proxy.output - - store_artifacts: - name: 'Save results' - path: src/test/regress/results/ - - store_artifacts: - name: 'Save coordinator log' - path: src/test/regress/tmp_check/master/log - - store_artifacts: - name: 'Save worker1 log' - path: src/test/regress/tmp_check/worker.57637/log - - store_artifacts: - name: 'Save worker2 log' - path: src/test/regress/tmp_check/worker.57638/log - - stack_trace: - steps: - - run: - name: 'Print stack traces' - command: | - ./ci/print_stack_trace.sh - when: on_fail - - coverage: - parameters: - flags: - description: 'codecov flags' - type: string - steps: - - codecov/upload: - flags: '<< parameters.flags >>' - - run: - name: 'Create codeclimate coverage' - command: | - lcov --directory . --capture --output-file lcov.info - lcov --remove lcov.info -o lcov.info '/usr/*' - sed "s=^SF:$PWD/=SF:=g" -i lcov.info # relative pats are required by codeclimate - mkdir -p /tmp/codeclimate - # We started getting permissions error. This fixes them and since - # weqre not on a multi-user system so this is safe to do. - git config --global --add safe.directory /home/circleci/project - cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/$CIRCLE_JOB.json lcov.info - - persist_to_workspace: - root: /tmp - paths: - - codeclimate/*.json - -jobs: - build: - description: Build the citus extension - parameters: - pg_major: - description: postgres major version building citus for - type: integer - image: - description: docker image to use for the build - type: string - default: citus/extbuilder - image_tag: - description: tag to use for the docker image - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - steps: - - checkout - - run: - name: 'Configure, Build, and Install' - command: | - ./ci/build-citus.sh - - persist_to_workspace: - root: . - paths: - - build-<< parameters.pg_major >>/* - - install-<>.tar - - check-style: - docker: - - image: 'citus/stylechecker:<< pipeline.parameters.style_checker_tools_version >><< pipeline.parameters.image_suffix >>' - steps: - - checkout - - run: - name: 'Check C Style' - command: citus_indent --check - - run: - name: 'Check Python style' - command: black --check . - - run: - name: 'Check Python import order' - command: isort --check . - - run: - name: 'Check Python lints' - command: flake8 . - - run: - name: 'Fix whitespace' - command: ci/editorconfig.sh && git diff --exit-code - - run: - name: 'Remove useless declarations' - command: ci/remove_useless_declarations.sh && git diff --cached --exit-code - - run: - name: 'Normalize test output' - command: ci/normalize_expected.sh && git diff --exit-code - - run: - name: 'Check for C-style comments in migration files' - command: ci/disallow_c_comments_in_migrations.sh && git diff --exit-code - - run: - name: 'Check for comment--cached ns that start with # character in spec files' - command: ci/disallow_hash_comments_in_spec_files.sh && git diff --exit-code - - run: - name: 'Check for gitignore entries .for source files' - command: ci/fix_gitignore.sh && git diff --exit-code - - run: - name: 'Check for lengths of changelog entries' - command: ci/disallow_long_changelog_entries.sh - - run: - name: 'Check for banned C API usage' - command: ci/banned.h.sh - - run: - name: 'Check for tests missing in schedules' - command: ci/check_all_tests_are_run.sh - - run: - name: 'Check if all CI scripts are actually run' - command: ci/check_all_ci_scripts_are_run.sh - - run: - name: 'Check if all GUCs are sorted alphabetically' - command: ci/check_gucs_are_alphabetically_sorted.sh - - run: - name: 'Check for missing downgrade scripts' - command: ci/check_migration_files.sh - - check-sql-snapshots: - docker: - - image: 'citus/extbuilder:latest' - steps: - - checkout - - run: - name: 'Check Snapshots' - command: ci/check_sql_snapshots.sh - - test-pg-upgrade: - description: Runs postgres upgrade tests - parameters: - old_pg_major: - description: 'postgres major version to use before the upgrade' - type: integer - new_pg_major: - description: 'postgres major version to upgrade to' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/pgupgradetester - image_tag: - description: 'docker image tag to use' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.old_pg_major >> - - install_extension: - pg_major: << parameters.new_pg_major >> - - configure - - enable_core - - run: - name: 'Install and test postgres upgrade' - command: | - gosu circleci \ - make -C src/test/regress \ - check-pg-upgrade \ - old-bindir=/usr/lib/postgresql/<< parameters.old_pg_major >>/bin \ - new-bindir=/usr/lib/postgresql/<< parameters.new_pg_major >>/bin - no_output_timeout: 2m - - run: - name: 'Copy pg_upgrade logs for newData dir' - command: | - mkdir -p /tmp/pg_upgrade_newData_logs - if ls src/test/regress/tmp_upgrade/newData/*.log 1> /dev/null 2>&1; then - cp src/test/regress/tmp_upgrade/newData/*.log /tmp/pg_upgrade_newData_logs - fi - when: on_fail - - store_artifacts: - name: 'Save pg_upgrade logs for newData dir' - path: /tmp/pg_upgrade_newData_logs - - save_logs_and_results - - save_regressions - - stack_trace - - coverage: - flags: 'test_<< parameters.old_pg_major >>_<< parameters.new_pg_major >>,upgrade' - - test-pytest: - description: Runs pytest based tests - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/failtester - image_tag: - description: 'docker image tag to use' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Run pytest' - command: | - gosu circleci \ - make -C src/test/regress check-pytest - no_output_timeout: 2m - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,pytest' - - - test-arbitrary-configs: - description: Runs tests on arbitrary configs - parallelism: 6 - parameters: - pg_major: - description: 'postgres major version to use' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/failtester - image_tag: - description: 'docker image tag to use' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - resource_class: xlarge - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Test arbitrary configs' - command: | - TESTS=$(src/test/regress/citus_tests/print_test_names.py | circleci tests split) - # Our test suite expects comma separated values - TESTS=$(echo $TESTS | tr ' ' ',') - # TESTS will contain subset of configs that will be run on a container and we use multiple containers - # to run the test suite - gosu circleci \ - make -C src/test/regress \ - check-arbitrary-configs parallel=4 CONFIGS=$TESTS - no_output_timeout: 2m - - run: - name: 'Show regressions' - command: | - find src/test/regress/tmp_citus_test/ -name "regression*.diffs" -exec cat {} + - lines=$(find src/test/regress/tmp_citus_test/ -name "regression*.diffs" | wc -l) - if [ $lines -ne 0 ]; then - exit 1 - fi - - when: on_fail - - run: - name: 'Copy logfiles' - command: | - mkdir src/test/regress/tmp_citus_test/logfiles - find src/test/regress/tmp_citus_test/ -name "logfile_*" -exec cp -t src/test/regress/tmp_citus_test/logfiles/ {} + - when: on_fail - - store_artifacts: - name: 'Save logfiles' - path: src/test/regress/tmp_citus_test/logfiles - - save_logs_and_results - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,upgrade' - - test-citus-upgrade: - description: Runs citus upgrade tests - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/citusupgradetester - image_tag: - description: 'docker image tag to use' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - configure - - enable_core - - run: - name: 'Install and test citus upgrade' - command: | - # run make check-citus-upgrade for all citus versions - # the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of - for citus_version in ${CITUS_VERSIONS}; do \ - gosu circleci \ - make -C src/test/regress \ - check-citus-upgrade \ - bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \ - citus-old-version=${citus_version} \ - citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \ - citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \ - done; - - # run make check-citus-upgrade-mixed for all citus versions - # the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of - for citus_version in ${CITUS_VERSIONS}; do \ - gosu circleci \ - make -C src/test/regress \ - check-citus-upgrade-mixed \ - citus-old-version=${citus_version} \ - bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \ - citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \ - citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \ - done; - no_output_timeout: 2m - - save_logs_and_results - - save_regressions - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,upgrade' - - test-query-generator: - description: Expects that the generated queries that are run on distributed and local tables would have the same results - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/failtester - image_tag: - description: 'docker image tag to use' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Run Test' - command: | - gosu circleci make -C src/test/regress check-query-generator - no_output_timeout: 5m - - run: - name: 'Show regressions' - command: | - find src/test/regress/citus_tests/query_generator/out/ -name "local_dist.diffs" -exec cat {} + - lines=$(find src/test/regress/citus_tests/query_generator/out/ -name "local_dist.diffs" | wc -l) - if [ $lines -ne 0 ]; then - exit 1 - fi - when: on_fail - - run: - name: 'Copy logfiles' - command: | - mkdir src/test/regress/tmp_citus_test/logfiles - find src/test/regress/tmp_citus_test/ -name "logfile_*" -exec cp -t src/test/regress/tmp_citus_test/logfiles/ {} + - when: on_fail - - store_artifacts: - name: 'Save logfiles' - path: src/test/regress/tmp_citus_test/logfiles - - store_artifacts: - name: 'Save ddls' - path: src/test/regress/citus_tests/query_generator/out/ddls.sql - - store_artifacts: - name: 'Save dmls' - path: src/test/regress/citus_tests/query_generator/out/queries.sql - - store_artifacts: - name: 'Save diffs' - path: src/test/regress/citus_tests/query_generator/out/local_dist.diffs - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,querygen' - - test-citus: - description: Runs the common tests of citus - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/exttester - image_tag: - description: 'docker image tag to use' - type: string - make: - description: 'make target' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Run Test' - command: | - gosu circleci make -C src/test/regress << parameters.make >> - no_output_timeout: 2m - - save_logs_and_results - - save_regressions - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,<< parameters.make >>' - - tap-test-citus: - description: Runs tap tests for citus - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/exttester - image_tag: - description: 'docker image tag to use' - type: string - suite: - description: 'name of the tap test suite to run' - type: string - make: - description: 'make target' - type: string - default: installcheck - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Run Test' - command: | - gosu circleci make -C src/test/<< parameters.suite >> << parameters.make >> - no_output_timeout: 2m - - store_artifacts: - name: 'Save tap logs' - path: /home/circleci/project/src/test/<< parameters.suite >>/tmp_check/log - - save_logs_and_results - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,tap_<< parameters.suite >>_<< parameters.make >>' - - check-merge-to-enterprise: - docker: - - image: citus/extbuilder:<< pipeline.parameters.pg14_version >> - working_directory: /home/circleci/project - steps: - - checkout - - run: - command: | - ci/check_enterprise_merge.sh - - ch_benchmark: - docker: - - image: buildpack-deps:stretch - working_directory: /home/circleci/project - steps: - - checkout - - azure-cli/install - - azure-cli/login-with-service-principal - - run: - command: | - cd ./src/test/hammerdb - sh run_hammerdb.sh citusbot_ch_benchmark_rg - name: install dependencies and run ch_benchmark tests - no_output_timeout: 20m - - tpcc_benchmark: - docker: - - image: buildpack-deps:stretch - working_directory: /home/circleci/project - steps: - - checkout - - azure-cli/install - - azure-cli/login-with-service-principal - - run: - command: | - cd ./src/test/hammerdb - sh run_hammerdb.sh citusbot_tpcc_benchmark_rg - name: install dependencies and run ch_benchmark tests - no_output_timeout: 20m - - test-flakyness: - description: Runs a test multiple times to see if it's flaky - parallelism: 32 - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/failtester - image_tag: - description: 'docker image tag to use' - type: string - test: - description: 'the test file path that should be run multiple times' - type: string - default: '' - runs: - description: 'number of times that the test should be run in total' - type: integer - default: 8 - skip: - description: 'A flag to bypass flaky test detection.' - type: boolean - default: false - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - resource_class: small - steps: - - checkout - - attach_workspace: - at: . - - run: - name: 'Detect regression tests need to be ran' - command: | - skip=<< parameters.skip >> - if [ "$skip" = true ]; then - echo "Skipping flaky test detection." - circleci-agent step halt - fi - - testForDebugging="<< parameters.test >>" - - if [ -z "$testForDebugging" ]; then - detected_changes=$(git diff origin/main... --name-only --diff-filter=AM | (grep 'src/test/regress/sql/.*\.sql\|src/test/regress/spec/.*\.spec\|src/test/regress/citus_tests/test/test_.*\.py' || true)) - tests=${detected_changes} - else - tests=$testForDebugging; - fi - - if [ -z "$tests" ]; then - echo "No test found." - circleci-agent step halt - else - echo "Detected tests " $tests - fi - - echo export tests=\""$tests"\" >> "$BASH_ENV" - source "$BASH_ENV" - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Run minimal tests' - command: | - tests_array=($tests) - for test in "${tests_array[@]}" - do - test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/") - gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat << parameters.runs >> --use-base-schedule --use-whole-schedule-line - done - no_output_timeout: 2m - - save_logs_and_results - - save_regressions - - stack_trace - - upload-coverage: - docker: - - image: 'citus/exttester:<< pipeline.parameters.pg15_version >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - attach_workspace: - at: . - - run: - name: Upload coverage results to Code Climate - command: | - cc-test-reporter sum-coverage codeclimate/*.json -o total.json - cc-test-reporter upload-coverage -i total.json - -workflows: - version: 2 - flaky_test_debugging: - jobs: - - build: - name: build-flaky-15 - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - - - test-flakyness: - name: 'test-15_flaky' - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-flaky-15] - test: '<< pipeline.parameters.flaky_test >>' - runs: << pipeline.parameters.flaky_test_runs_per_job >> - - build_and_test: - jobs: - - build: - name: build-14 - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - - build: - name: build-15 - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - - build: - name: build-16 - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - - - check-style - - check-sql-snapshots - - - test-citus: &test-citus-14 - name: 'test-14_check-split' - make: check-split - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - requires: [build-14] - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise' - make: check-enterprise - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise-isolation' - make: check-enterprise-isolation - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise-isolation-logicalrep-1' - make: check-enterprise-isolation-logicalrep-1 - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise-isolation-logicalrep-2' - make: check-enterprise-isolation-logicalrep-2 - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise-isolation-logicalrep-3' - make: check-enterprise-isolation-logicalrep-3 - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise-failure' - image: citus/failtester - make: check-enterprise-failure - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-multi' - make: check-multi - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-multi-1' - make: check-multi-1 - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-mx' - make: check-multi-mx - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-vanilla' - make: check-vanilla - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-isolation' - make: check-isolation - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-operations' - make: check-operations - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-follower-cluster' - make: check-follower-cluster - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-columnar' - make: check-columnar - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-columnar-isolation' - make: check-columnar-isolation - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-failure' - image: citus/failtester - make: check-failure - - - test-citus: &test-citus-15 - name: 'test-15_check-split' - make: check-split - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise' - make: check-enterprise - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise-isolation' - make: check-enterprise-isolation - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise-isolation-logicalrep-1' - make: check-enterprise-isolation-logicalrep-1 - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise-isolation-logicalrep-2' - make: check-enterprise-isolation-logicalrep-2 - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise-isolation-logicalrep-3' - make: check-enterprise-isolation-logicalrep-3 - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise-failure' - image: citus/failtester - make: check-enterprise-failure - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-multi' - make: check-multi - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-multi-1' - make: check-multi-1 - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-mx' - make: check-multi-mx - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-vanilla' - make: check-vanilla - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-isolation' - make: check-isolation - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-operations' - make: check-operations - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-follower-cluster' - make: check-follower-cluster - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-columnar' - make: check-columnar - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-columnar-isolation' - make: check-columnar-isolation - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-failure' - image: citus/failtester - make: check-failure - - - test-citus: &test-citus-16 - name: 'test-16_check-split' - make: check-split - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - requires: [build-16] - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise' - make: check-enterprise - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise-isolation' - make: check-enterprise-isolation - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise-isolation-logicalrep-1' - make: check-enterprise-isolation-logicalrep-1 - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise-isolation-logicalrep-2' - make: check-enterprise-isolation-logicalrep-2 - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise-isolation-logicalrep-3' - make: check-enterprise-isolation-logicalrep-3 - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise-failure' - image: citus/failtester - make: check-enterprise-failure - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-multi' - make: check-multi - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-multi-1' - make: check-multi-1 - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-mx' - make: check-multi-mx - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-vanilla' - make: check-vanilla - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-isolation' - make: check-isolation - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-operations' - make: check-operations - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-follower-cluster' - make: check-follower-cluster - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-columnar' - make: check-columnar - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-columnar-isolation' - make: check-columnar-isolation - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-failure' - image: citus/failtester - make: check-failure - - - test-pytest: - name: 'test-14_pytest' - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - requires: [build-14] - - - test-pytest: - name: 'test-15_pytest' - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - - - test-pytest: - name: 'test-16_pytest' - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - requires: [build-16] - - - tap-test-citus: - name: 'test-15_tap-cdc' - suite: cdc - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - - - tap-test-citus: - name: 'test-16_tap-cdc' - suite: cdc - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - requires: [build-16] - - - test-arbitrary-configs: - name: 'test-14_check-arbitrary-configs' - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - requires: [build-14] - - - test-arbitrary-configs: - name: 'test-15_check-arbitrary-configs' - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - - - test-arbitrary-configs: - name: 'test-16_check-arbitrary-configs' - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - requires: [build-16] - - - test-query-generator: - name: 'test-14_check-query-generator' - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - requires: [build-14] - - - test-query-generator: - name: 'test-15_check-query-generator' - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - - - test-query-generator: - name: 'test-16_check-query-generator' - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - requires: [build-16] - - - test-pg-upgrade: - name: 'test-14-15_check-pg-upgrade' - old_pg_major: 14 - new_pg_major: 15 - image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' - requires: [build-14, build-15] - - - test-pg-upgrade: - name: 'test-15-16_check-pg-upgrade' - old_pg_major: 15 - new_pg_major: 16 - image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' - requires: [build-15, build-16] - - - test-pg-upgrade: - name: 'test-14-16_check-pg-upgrade' - old_pg_major: 14 - new_pg_major: 16 - image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' - requires: [build-14, build-16] - - - test-citus-upgrade: - name: test-14_check-citus-upgrade - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - requires: [build-14] - - - upload-coverage: - requires: - - test-14_check-multi - - test-14_check-multi-1 - - test-14_check-mx - - test-14_check-vanilla - - test-14_check-isolation - - test-14_check-operations - - test-14_check-follower-cluster - - test-14_check-columnar - - test-14_check-columnar-isolation - - test-14_check-failure - - test-14_check-enterprise - - test-14_check-enterprise-isolation - - test-14_check-enterprise-isolation-logicalrep-1 - - test-14_check-enterprise-isolation-logicalrep-2 - - test-14_check-enterprise-isolation-logicalrep-3 - - test-14_check-enterprise-failure - - test-14_check-split - - test-14_check-arbitrary-configs - - test-14_check-query-generator - - test-15_check-multi - - test-15_check-multi-1 - - test-15_check-mx - - test-15_check-vanilla - - test-15_check-isolation - - test-15_check-operations - - test-15_check-follower-cluster - - test-15_check-columnar - - test-15_check-columnar-isolation - - test-15_check-failure - - test-15_check-enterprise - - test-15_check-enterprise-isolation - - test-15_check-enterprise-isolation-logicalrep-1 - - test-15_check-enterprise-isolation-logicalrep-2 - - test-15_check-enterprise-isolation-logicalrep-3 - - test-15_check-enterprise-failure - - test-15_check-split - - test-15_check-arbitrary-configs - - test-15_check-query-generator - - test-16_check-multi - - test-16_check-multi-1 - - test-16_check-mx - - test-16_check-vanilla - - test-16_check-isolation - - test-16_check-operations - - test-16_check-follower-cluster - - test-16_check-columnar - - test-16_check-columnar-isolation - - test-16_check-failure - - test-16_check-enterprise - - test-16_check-enterprise-isolation - - test-16_check-enterprise-isolation-logicalrep-1 - - test-16_check-enterprise-isolation-logicalrep-2 - - test-16_check-enterprise-isolation-logicalrep-3 - - test-16_check-enterprise-failure - - test-16_check-split - - test-16_check-arbitrary-configs - - test-16_check-query-generator - - test-14-15_check-pg-upgrade - - test-15-16_check-pg-upgrade - - test-14-16_check-pg-upgrade - - test-14_check-citus-upgrade - - - ch_benchmark: - requires: [build-14] - filters: - branches: - only: - - /ch_benchmark\/.*/ # match with ch_benchmark/ prefix - - tpcc_benchmark: - requires: [build-14] - filters: - branches: - only: - - /tpcc_benchmark\/.*/ # match with tpcc_benchmark/ prefix - - test-flakyness: - name: 'test-15_flaky' - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - skip: << pipeline.parameters.skip_flaky_tests >> diff --git a/.devcontainer/src/test/regress/Pipfile.lock b/.devcontainer/src/test/regress/Pipfile.lock index 15cb7ecda..bdb42a1c3 100644 --- a/.devcontainer/src/test/regress/Pipfile.lock +++ b/.devcontainer/src/test/regress/Pipfile.lock @@ -127,72 +127,61 @@ }, "cffi": { "hashes": [ - "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5", - "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef", - "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104", - "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426", - "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405", - "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375", - "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a", - "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e", - "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc", - "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf", - "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185", - "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497", - "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3", - "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35", - "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c", - "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83", - "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21", - "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca", - "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984", - "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac", - "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd", - "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee", - "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a", - "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2", - "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192", - "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7", - "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585", - "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f", - "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e", - "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27", - "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b", - "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e", - "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e", - "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d", - "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c", - "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415", - "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82", - "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02", - "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314", - "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325", - "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c", - "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3", - "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914", - "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045", - "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d", - "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9", - "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5", - "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2", - "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c", - "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3", - "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2", - "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8", - "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d", - "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d", - "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9", - "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162", - "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76", - "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4", - "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e", - "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9", - "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6", - "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b", - "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01", - "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0" + "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", + "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", + "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", + "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", + "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", + "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", + "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", + "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", + "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", + "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", + "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", + "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", + "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", + "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", + "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", + "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", + "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", + "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", + "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", + "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", + "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", + "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", + "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", + "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", + "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", + "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", + "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", + "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", + "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", + "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", + "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", + "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", + "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", + "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", + "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", + "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", + "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", + "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", + "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", + "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", + "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", + "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", + "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", + "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", + "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", + "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", + "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", + "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", + "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", + "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", + "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", + "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" ], - "version": "==1.15.1" + "markers": "python_version >= '3.8'", + "version": "==1.16.0" }, "click": { "hashes": [ @@ -420,78 +409,78 @@ "mitmproxy": { "editable": true, "git": "https://github.com/citusdata/mitmproxy.git", - "markers": "python_version >= '3.10'", + "markers": "python_version >= '3.9'", "ref": "2fd18ef051b987925a36337ab1d61aa674353b44" }, "msgpack": { "hashes": [ - "sha256:00ce5f827d4f26fc094043e6f08b6069c1b148efa2631c47615ae14fb6cafc89", - "sha256:04450e4b5e1e662e7c86b6aafb7c230af9334fd0becf5e6b80459a507884241c", - "sha256:099c3d8a027367e1a6fc55d15336f04ff65c60c4f737b5739f7db4525c65fe9e", - "sha256:102cfb54eaefa73e8ca1e784b9352c623524185c98e057e519545131a56fb0af", - "sha256:14db7e1b7a7ed362b2f94897bf2486c899c8bb50f6e34b2db92fe534cdab306f", - "sha256:159cfec18a6e125dd4723e2b1de6f202b34b87c850fb9d509acfd054c01135e9", - "sha256:1dc67b40fe81217b308ab12651adba05e7300b3a2ccf84d6b35a878e308dd8d4", - "sha256:1f0e36a5fa7a182cde391a128a64f437657d2b9371dfa42eda3436245adccbf5", - "sha256:229ccb6713c8b941eaa5cf13dc7478eba117f21513b5893c35e44483e2f0c9c8", - "sha256:25d3746da40f3c8c59c3b1d001e49fd2aa17904438f980d9a391370366df001e", - "sha256:32c0aff31f33033f4961abc01f78497e5e07bac02a508632aef394b384d27428", - "sha256:33bbf47ea5a6ff20c23426106e81863cdbb5402de1825493026ce615039cc99d", - "sha256:35ad5aed9b52217d4cea739d0ea3a492a18dd86fecb4b132668a69f27fb0363b", - "sha256:3910211b0ab20be3a38e0bb944ed45bd4265d8d9f11a3d1674b95b298e08dd5c", - "sha256:3b5658b1f9e486a2eec4c0c688f213a90085b9cf2fec76ef08f98fdf6c62f4b9", - "sha256:40b801b768f5a765e33c68f30665d3c6ee1c8623a2d2bb78e6e59f2db4e4ceb7", - "sha256:47275ff73005a3e5e146e50baa2378e1730cba6e292f0222bc496a8e4c4adfc8", - "sha256:55bb4a1bf94e39447bc08238a2fb8a767460388a8192f67c103442eb36920887", - "sha256:5b08676a17e3f791daad34d5fcb18479e9c85e7200d5a17cbe8de798643a7e37", - "sha256:5b16344032a27b2ccfd341f89dadf3e4ef6407d91e4b93563c14644a8abb3ad7", - "sha256:5c5e05e4f5756758c58a8088aa10dc70d851c89f842b611fdccfc0581c1846bc", - "sha256:5cd67674db3c73026e0a2c729b909780e88bd9cbc8184256f9567640a5d299a8", - "sha256:5e7fae9ca93258a956551708cf60dc6c8145574e32ce8c8c4d894e63bcb04341", - "sha256:61213482b5a387ead9e250e9e3cb290292feca39dc83b41c3b1b7b8ffc8d8ecb", - "sha256:619a63753ba9e792fe3c6c0fc2b9ee2cfbd92153dd91bee029a89a71eb2942cd", - "sha256:652e4b7497825b0af6259e2c54700e6dc33d2fc4ed92b8839435090d4c9cc911", - "sha256:68569509dd015fcdd1e6b2b3ccc8c51fd27d9a97f461ccc909270e220ee09685", - "sha256:6a01a072b2219b65a6ff74df208f20b2cac9401c60adb676ee34e53b4c651077", - "sha256:70843788c85ca385846a2d2f836efebe7bb2687ca0734648bf5c9dc6c55602d2", - "sha256:76820f2ece3b0a7c948bbb6a599020e29574626d23a649476def023cbb026787", - "sha256:7a006c300e82402c0c8f1ded11352a3ba2a61b87e7abb3054c845af2ca8d553c", - "sha256:7baf16fd8908a025c4a8d7b699103e72d41f967e2aee5a2065432bcdbd9fd06e", - "sha256:7ecf431786019a7bfedc28281531d706627f603e3691d64eccdbce3ecd353823", - "sha256:885de1ed5ea01c1bfe0a34c901152a264c3c1f8f1d382042b92ea354bd14bb0e", - "sha256:88cdb1da7fdb121dbb3116910722f5acab4d6e8bfcacab8fafe27e2e7744dc6a", - "sha256:95ade0bd4cf69e04e8b8f8ec2d197d9c9c4a9b6902e048dc7456bf6d82e12a80", - "sha256:9b88dc97ba86c96b964c3745a445d9a65f76fe21955a953064fe04adb63e9367", - "sha256:9c780d992f5d734432726b92a0c87bf1857c3d85082a8dea29cbf56e44a132b3", - "sha256:9f85200ea102276afdd3749ca94747f057bbb868d1c52921ee2446730b508d0f", - "sha256:a1cf98afa7ad5e7012454ca3fde254499a13f9d92fd50cb46118118a249a1355", - "sha256:a635aecf1047255576dbb0927cbf9a7aa4a68e9d54110cc3c926652d18f144e0", - "sha256:ae97504958d0bc58c1152045c170815d5c4f8af906561ce044b6358b43d0c97e", - "sha256:b06a5095a79384760625b5de3f83f40b3053a385fb893be8a106fbbd84c14980", - "sha256:b5c8dd9a386a66e50bd7fa22b7a49fb8ead2b3574d6bd69eb1caced6caea0803", - "sha256:bae6c561f11b444b258b1b4be2bdd1e1cf93cd1d80766b7e869a79db4543a8a8", - "sha256:bbb4448a05d261fae423d5c0b0974ad899f60825bc77eabad5a0c518e78448c2", - "sha256:bd6af61388be65a8701f5787362cb54adae20007e0cc67ca9221a4b95115583b", - "sha256:bf652839d16de91fe1cfb253e0a88db9a548796939533894e07f45d4bdf90a5f", - "sha256:d6d25b8a5c70e2334ed61a8da4c11cd9b97c6fbd980c406033f06e4463fda006", - "sha256:da057d3652e698b00746e47f06dbb513314f847421e857e32e1dc61c46f6c052", - "sha256:e0ed35d6d6122d0baa9a1b59ebca4ee302139f4cfb57dab85e4c73ab793ae7ed", - "sha256:e36560d001d4ba469d469b02037f2dd404421fd72277d9474efe9f03f83fced5", - "sha256:f4321692e7f299277e55f322329b2c972d93bb612d85f3fda8741bec5c6285ce", - "sha256:f75114c05ec56566da6b55122791cf5bb53d5aada96a98c016d6231e03132f76", - "sha256:fb4571efe86545b772a4630fee578c213c91cbcfd20347806e47fd4e782a18fe", - "sha256:fc97aa4b4fb928ff4d3b74da7c30b360d0cb3ede49a5a6e1fd9705f49aea1deb" + "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862", + "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d", + "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3", + "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672", + "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0", + "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9", + "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee", + "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46", + "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524", + "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819", + "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc", + "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc", + "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1", + "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82", + "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81", + "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6", + "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d", + "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2", + "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c", + "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87", + "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84", + "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e", + "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95", + "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f", + "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b", + "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93", + "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf", + "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61", + "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c", + "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8", + "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d", + "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c", + "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4", + "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba", + "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415", + "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee", + "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d", + "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9", + "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075", + "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f", + "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7", + "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681", + "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329", + "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1", + "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf", + "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c", + "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5", + "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b", + "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5", + "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e", + "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b", + "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad", + "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd", + "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7", + "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002", + "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc" ], "markers": "python_version >= '3.8'", - "version": "==1.0.6" + "version": "==1.0.7" }, "packaging": { "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" ], "markers": "python_version >= '3.7'", - "version": "==23.1" + "version": "==23.2" }, "passlib": { "hashes": [ @@ -698,6 +687,62 @@ "markers": "python_version >= '3'", "version": "==0.17.16" }, + "ruamel.yaml.clib": { + "hashes": [ + "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d", + "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", + "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", + "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", + "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", + "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", + "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", + "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", + "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1", + "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", + "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", + "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", + "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", + "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", + "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f", + "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", + "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa", + "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", + "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", + "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", + "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", + "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", + "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3", + "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", + "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", + "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", + "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", + "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", + "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279", + "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", + "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", + "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", + "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb", + "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942", + "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", + "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", + "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", + "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd", + "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", + "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", + "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", + "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1", + "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2", + "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875", + "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412" + ], + "markers": "python_version < '3.10' and platform_python_implementation == 'CPython'", + "version": "==0.2.8" + }, "sortedcontainers": { "hashes": [ "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", @@ -746,11 +791,12 @@ }, "werkzeug": { "hashes": [ - "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", - "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" + "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", + "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" ], + "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==2.3.7" + "version": "==3.0.1" }, "wsproto": { "hashes": [ @@ -906,11 +952,11 @@ }, "packaging": { "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" ], "markers": "python_version >= '3.7'", - "version": "==23.1" + "version": "==23.2" }, "pathspec": { "hashes": [ @@ -922,19 +968,19 @@ }, "platformdirs": { "hashes": [ - "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d", - "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d" + "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3", + "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e" ], "markers": "python_version >= '3.7'", - "version": "==3.10.0" + "version": "==3.11.0" }, "pycodestyle": { "hashes": [ - "sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0", - "sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8" + "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f", + "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67" ], "markers": "python_version >= '3.8'", - "version": "==2.11.0" + "version": "==2.11.1" }, "pyflakes": { "hashes": [ diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 1f22ff034..e938e3904 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -10,8 +10,13 @@ on: required: false default: false type: boolean + push: + branches: + - "main" + - "release-*" pull_request: types: [opened, reopened,synchronize] + merge_group: jobs: # Since GHA does not interpolate env varibles in matrix context, we need to # define them in a separate job and use them in other jobs. @@ -27,9 +32,9 @@ jobs: style_checker_image_name: "citus/stylechecker" style_checker_tools_version: "0.8.18" image_suffix: "-v9d71045" - pg14_version: "14.9" - pg15_version: "15.4" - pg16_version: "16.0" + pg14_version: '{ "major": "14", "full": "14.9" }' + pg15_version: '{ "major": "15", "full": "15.4" }' + pg16_version: '{ "major": "16", "full": "16.0" }' upgrade_pg_versions: "14.9-15.4-16.0" steps: # Since GHA jobs needs at least one step we use a noop step here. @@ -93,7 +98,7 @@ jobs: run: ci/check_migration_files.sh build: needs: params - name: Build for PG ${{ matrix.pg_version}} + name: Build for PG${{ fromJson(matrix.pg_version).major }} strategy: fail-fast: false matrix: @@ -107,7 +112,7 @@ jobs: - ${{ needs.params.outputs.pg16_version }} runs-on: ubuntu-20.04 container: - image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ matrix.image_suffix }}" + image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" options: --user root steps: - uses: actions/checkout@v3.5.0 @@ -124,7 +129,7 @@ jobs: ./build-${{ env.PG_MAJOR }}/* ./install-${{ env.PG_MAJOR }}.tar test-citus: - name: PG${{ matrix.pg_version }} - ${{ matrix.make }} + name: PG${{ fromJson(matrix.pg_version).major }} - ${{ matrix.make }} strategy: fail-fast: false matrix: @@ -211,7 +216,7 @@ jobs: image_name: ${{ needs.params.outputs.fail_test_image_name }} runs-on: ubuntu-20.04 container: - image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ needs.params.outputs.image_suffix }}" + image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}" options: --user root --dns=8.8.8.8 # Due to Github creates a default network for each job, we need to use # --dns= to have similar DNS settings as our other CI systems or local @@ -228,17 +233,17 @@ jobs: - uses: "./.github/actions/save_logs_and_results" if: always() with: - folder: ${{ matrix.pg_version }}_${{ matrix.make }} + folder: ${{ fromJson(matrix.pg_version).major }}_${{ matrix.make }} - uses: "./.github/actions/upload_coverage" if: always() with: flags: ${{ env.PG_MAJOR }}_${{ matrix.suite }}_${{ matrix.make }} codecov_token: ${{ secrets.CODECOV_TOKEN }} test-arbitrary-configs: - name: PG${{ matrix.pg_version }} - check-arbitrary-configs-${{ matrix.parallel }} + name: PG${{ fromJson(matrix.pg_version).major }} - check-arbitrary-configs-${{ matrix.parallel }} runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"] container: - image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ needs.params.outputs.image_suffix }}" + image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}" options: --user root needs: - params @@ -333,10 +338,10 @@ jobs: flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade codecov_token: ${{ secrets.CODECOV_TOKEN }} test-citus-upgrade: - name: PG${{ needs.params.outputs.pg14_version }} - check-citus-upgrade + name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade runs-on: ubuntu-20.04 container: - image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ needs.params.outputs.pg14_version }}${{ needs.params.outputs.image_suffix }}" + image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}" options: --user root needs: - params @@ -383,7 +388,7 @@ jobs: CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }} runs-on: ubuntu-20.04 container: - image: ${{ needs.params.outputs.test_image_name }}:${{ needs.params.outputs.pg16_version }}${{ needs.params.outputs.image_suffix }} + image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }} needs: - params - test-citus @@ -478,7 +483,7 @@ jobs: name: Test flakyness runs-on: ubuntu-20.04 container: - image: ${{ needs.params.outputs.fail_test_image_name }}:${{ needs.params.outputs.pg16_version }}${{ needs.params.outputs.image_suffix }} + image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }} options: --user root env: runs: 8 @@ -492,7 +497,6 @@ jobs: matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} steps: - uses: actions/checkout@v3.5.0 - - uses: actions/download-artifact@v3.0.1 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- @@ -501,7 +505,7 @@ jobs: for test in "${tests_array[@]}" do test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/") - gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line + gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat ${{ env.runs }} --use-whole-schedule-line done shell: bash - uses: "./.github/actions/save_logs_and_results" diff --git a/.github/workflows/flaky_test_debugging.yml b/.github/workflows/flaky_test_debugging.yml index a666c1cd5..a744edc3b 100644 --- a/.github/workflows/flaky_test_debugging.yml +++ b/.github/workflows/flaky_test_debugging.yml @@ -71,7 +71,7 @@ jobs: - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- - gosu circleci src/test/regress/citus_tests/run_test.py ${{ env.test }} --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line + gosu circleci src/test/regress/citus_tests/run_test.py ${{ env.test }} --repeat ${{ env.runs }} --use-whole-schedule-line shell: bash - uses: "./.github/actions/save_logs_and_results" if: always() diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 0fb4b7092..51bd82503 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -3,6 +3,7 @@ name: Build tests in packaging images on: pull_request: types: [opened, reopened,synchronize] + merge_group: workflow_dispatch: @@ -24,9 +25,11 @@ jobs: - name: Get Postgres Versions id: get-postgres-versions run: | - # Postgres versions are stored in .github/workflows/build_and_test.yml file in "pg[pg-version]_version" - # format. Below command extracts the versions and get the unique values. - pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE 'pg[0-9]+_version: "[0-9.]+"' | sed -E 's/pg([0-9]+)_version: "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',') + set -euxo pipefail + # Postgres versions are stored in .github/workflows/build_and_test.yml + # file in json strings with major and full keys. + # Below command extracts the versions and get the unique values. + pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE '"major": "[0-9]+", "full": "[0-9.]+"' | sed -E 's/"major": "([0-9]+)", "full": "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',') pg_versions_array="[ ${pg_versions} ]" echo "Supported PG Versions: ${pg_versions_array}" # Below line is needed to set the output variable to be used in the next job diff --git a/CHANGELOG.md b/CHANGELOG.md index 02fc91d04..686e78dd1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +### citus v12.1.1 (November 9, 2023) ### + +* Fixes leaking of memory and memory contexts in Citus foreign key cache + (#7219) + +* Makes sure to disallow creating a replicated distributed table concurrently + (#7236) + ### citus v12.1.0 (September 12, 2023) ### * Adds support for PostgreSQL 16.0 (#7173) diff --git a/ci/check_all_ci_scripts_are_run.sh b/ci/check_all_ci_scripts_are_run.sh index 0b7abb3e3..12516f793 100755 --- a/ci/check_all_ci_scripts_are_run.sh +++ b/ci/check_all_ci_scripts_are_run.sh @@ -14,8 +14,8 @@ ci_scripts=$( grep -v -E '^(ci_helpers.sh|fix_style.sh)$' ) for script in $ci_scripts; do - if ! grep "\\bci/$script\\b" .circleci/config.yml > /dev/null; then - echo "ERROR: CI script with name \"$script\" is not actually used in .circleci/config.yml" + if ! grep "\\bci/$script\\b" -r .github > /dev/null; then + echo "ERROR: CI script with name \"$script\" is not actually used in .github folder" exit 1 fi if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then diff --git a/ci/check_enterprise_merge.sh b/ci/check_enterprise_merge.sh deleted file mode 100755 index d29ffcad8..000000000 --- a/ci/check_enterprise_merge.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash - -# Testing this script locally requires you to set the following environment -# variables: -# CIRCLE_BRANCH, GIT_USERNAME and GIT_TOKEN - -# fail if trying to reference a variable that is not set. -set -u -# exit immediately if a command fails -set -e -# Fail on pipe failures -set -o pipefail - -PR_BRANCH="${CIRCLE_BRANCH}" -ENTERPRISE_REMOTE="https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/citusdata/citus-enterprise" - -# shellcheck disable=SC1091 -source ci/ci_helpers.sh - -# List executed commands. This is done so debugging this script is easier when -# it fails. It's explicitly done after git remote add so username and password -# are not shown in CI output (even though it's also filtered out by CircleCI) -set -x - -check_compile () { - echo "INFO: checking if merged code can be compiled" - ./configure --without-libcurl - make -j10 -} - -# Clone current git repo (which should be community) to a temporary working -# directory and go there -GIT_DIR_ROOT="$(git rev-parse --show-toplevel)" -TMP_GIT_DIR="$(mktemp --directory -t citus-merge-check.XXXXXXXXX)" -git clone "$GIT_DIR_ROOT" "$TMP_GIT_DIR" -cd "$TMP_GIT_DIR" - -# Fails in CI without this -git config user.email "citus-bot@microsoft.com" -git config user.name "citus bot" - -# Disable "set -x" temporarily, because $ENTERPRISE_REMOTE contains passwords -{ set +x ; } 2> /dev/null -git remote add enterprise "$ENTERPRISE_REMOTE" -set -x - -git remote set-url --push enterprise no-pushing - -# Fetch enterprise-master -git fetch enterprise enterprise-master - - -git checkout "enterprise/enterprise-master" - -if git merge --no-commit "origin/$PR_BRANCH"; then - echo "INFO: community PR branch could be merged into enterprise-master" - # check that we can compile after the merge - if check_compile; then - exit 0 - fi - - echo "WARN: Failed to compile after community PR branch was merged into enterprise" -fi - -# undo partial merge -git merge --abort - -# If we have a conflict on enterprise merge on the master branch, we have a problem. -# Provide an error message to indicate that enterprise merge is needed to fix this check. -if [[ $PR_BRANCH = master ]]; then - echo "ERROR: Master branch has merge conflicts with enterprise-master." - echo "Try re-running this CI job after merging your changes into enterprise-master." - exit 1 -fi - -if ! git fetch enterprise "$PR_BRANCH" ; then - echo "ERROR: enterprise/$PR_BRANCH was not found and community PR branch could not be merged into enterprise-master" - exit 1 -fi - -# Show the top commit of the enterprise PR branch to make debugging easier -git log -n 1 "enterprise/$PR_BRANCH" - -# Check that this branch contains the top commit of the current community PR -# branch. If it does not it means it's not up to date with the current PR, so -# the enterprise branch should be updated. -if ! git merge-base --is-ancestor "origin/$PR_BRANCH" "enterprise/$PR_BRANCH" ; then - echo "ERROR: enterprise/$PR_BRANCH is not up to date with community PR branch" - exit 1 -fi - -# Now check if we can merge the enterprise PR into enterprise-master without -# issues. -git merge --no-commit "enterprise/$PR_BRANCH" -# check that we can compile after the merge -check_compile diff --git a/src/backend/columnar/columnar_compression.c b/src/backend/columnar/columnar_compression.c index 98a175b06..50cdfb01b 100644 --- a/src/backend/columnar/columnar_compression.c +++ b/src/backend/columnar/columnar_compression.c @@ -18,7 +18,7 @@ #include "lib/stringinfo.h" #include "columnar/columnar_compression.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if HAVE_CITUS_LIBLZ4 #include diff --git a/src/backend/columnar/columnar_debug.c b/src/backend/columnar/columnar_debug.c index cbb0d554f..c60919513 100644 --- a/src/backend/columnar/columnar_debug.c +++ b/src/backend/columnar/columnar_debug.c @@ -15,7 +15,7 @@ #include "access/table.h" #include "catalog/pg_am.h" #include "catalog/pg_type.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" #include "storage/fd.h" #include "storage/smgr.h" diff --git a/src/backend/distributed/README.md b/src/backend/distributed/README.md index 7c4f43add..225b1f962 100644 --- a/src/backend/distributed/README.md +++ b/src/backend/distributed/README.md @@ -1723,11 +1723,11 @@ Merge command the same principles as INSERT .. SELECT processing. However, due t # DDL -DDL commands are primarily handled via the ProcessUtility hook, which gets the parse tree of the DDL command. For supported DDL commands, we always follow the same sequence of steps: +DDL commands are primarily handled via the citus_ProcessUtility hook, which gets the parse tree of the DDL command. For supported DDL commands, we always follow the same sequence of steps: 1. Qualify the table names in the parse tree (simplifies deparsing, avoids sensitivity to search_path changes) 2. Pre-process logic -3. Call original ProcessUtility to execute the command on the local shell table +3. Call original previous ProcessUtility to execute the command on the local shell table 4. Post-process logic 5. Execute command on all other nodes 6. Execute command on shards (in case of table DDL) @@ -1749,6 +1749,66 @@ The reason for handling dependencies and deparsing in post-process step is that Not all table DDL is currently deparsed. In that case, the original command sent by the client is used. That is a shortcoming in our DDL logic that causes user-facing issues and should be addressed. We do not directly construct a separate DDL command for each shard. Instead, we call the `worker_apply_shard_ddl_command(shardid bigint, ddl_command text)` function which parses the DDL command, replaces the table names with shard names in the parse tree according to the shard ID, and then executes the command. That also has some shortcomings, because we cannot support more complex DDL commands in this manner (e.g. adding multiple foreign keys). Ideally, all DDL would be deparsed, and for table DDL the deparsed query string would have shard names, similar to regular queries. +`markDistributed` is used to indicate whether we add a record to `pg_dist_object` to mark the object as "distributed". + +## Defining a new DDL command + +All commands that are propagated by Citus should be defined in DistributeObjectOps struct. Below is a sample DistributeObjectOps for ALTER DATABASE command that is defined in [distribute_object_ops.c](commands/distribute_object_ops.c) file. + +```c +static DistributeObjectOps Database_Alter = { + .deparse = DeparseAlterDatabaseStmt, + .qualify = NULL, + .preprocess = PreprocessAlterDatabaseStmt, + .postprocess = NULL, + .objectType = OBJECT_DATABASE, + .operationType = DIST_OPS_ALTER, + .address = NULL, + .markDistributed = false, +}; +``` + +Each field in the struct is documented in the comments within the `DistributeObjectOps`. When defining a new DDL command, follow these guidelines: + +- **Returning tasks for `preprocess` and `postprocess`**: Ensure that either `preprocess` or `postprocess` returns a list of "DDLJob"s. If both functions return non-empty lists, then you would get an assertion failure. + +- **Generic `preprocess` and `postprocess` methods**: The generic methods, `PreprocessAlterDistributedObjectStmt` and `PostprocessAlterDistributedObjectStmt`, serve as generic pre and post methods utilized for various statements. Both of these methods find application in distributed object operations. + + - The `PreprocessAlterDistributedObjectStmt` method carries out the following operations: + - Performs a qualification operation. + - Deparses the statement and generates a task list. + + - As for the `PostprocessAlterDistributedObjectStmt` method, it: + - Invokes the `EnsureAllObjectDependenciesExistOnAllNodes` function to propagate missing dependencies, both on the coordinator and the worker. + + - Before defining new `preprocess` or `postprocess` methods, it is advisable to assess whether the generic methods can be employed in your specific case. + + +- **`deparse`**: When propagating the command to worker nodes, make sure to define `deparse`. This is necessary because it generates a query string for each worker node. + +- **`markDistributed`**: Set this flag to true if you want to add a record to the `pg_dist_object` table. This is particularly important for `CREATE` statements when introducing a new object to the system. + +- **`address`**: If `markDistributed` is set to true, you must define the `address`. Failure to do so will result in a runtime error. The `address` is required to identify the fields that will be stored in the `pg_dist_object` table. + +- **`markDistributed` usage in `DROP` Statements**: Please note that `markDistributed` does not apply to `DROP` statements. For `DROP` statements, instead you need to call `UnmarkObjectDistributed()` for the object either in `preprocess` or `postprocess`. Otherwise, state records in ``pg_dist_object`` table will cause errors in UDF calls such as ``citus_add_node()``, which will try to copy the non-existent db object. + +- **`qualify`**: The `qualify` function is used to qualify the objects based on their schemas in the parse tree. It is employed to prevent sensitivity to changes in the `search_path` on worker nodes. Note that it is not mandatory to define this function for all DDL commands. It is only required for commands that involve objects that are bound to schemas, such as; tables, types, functions and so on. + +After defining the `DistributeObjectOps` structure, this structure should be implemented in the `GetDistributeObjectOps()` function as shown below: + +```c +// Example implementation in C code +const DistributeObjectOps * +GetDistributeObjectOps(Node *node) +{ + switch (nodeTag(node)) + { + case T_AlterDatabaseStmt: + { + return &Database_Alter; + } +... +``` ## Object & dependency propagation diff --git a/src/backend/distributed/commands/call.c b/src/backend/distributed/commands/call.c index b2f0bfca1..12a1d93b8 100644 --- a/src/backend/distributed/commands/call.c +++ b/src/backend/distributed/commands/call.c @@ -13,7 +13,7 @@ #include "postgres.h" #include "funcapi.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/pg_proc.h" #include "commands/defrem.h" diff --git a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c index 1102a3a51..9b22fb161 100644 --- a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c +++ b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/xact.h" #include "catalog/pg_constraint.h" diff --git a/src/backend/distributed/commands/citus_global_signal.c b/src/backend/distributed/commands/citus_global_signal.c index 8183d6673..b1f4cf187 100644 --- a/src/backend/distributed/commands/citus_global_signal.c +++ b/src/backend/distributed/commands/citus_global_signal.c @@ -11,7 +11,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/backend_data.h" #include "distributed/metadata_cache.h" diff --git a/src/backend/distributed/commands/cluster.c b/src/backend/distributed/commands/cluster.c index 92fcb3ec6..cdae6fc08 100644 --- a/src/backend/distributed/commands/cluster.c +++ b/src/backend/distributed/commands/cluster.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "commands/defrem.h" diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index 023197e15..521ce4b3d 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -27,7 +27,7 @@ #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" #include "distributed/worker_create_or_replace.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/worker_manager.h" #include "parser/parse_type.h" #include "utils/builtins.h" diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index 9a87df9f1..957e26161 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -14,8 +14,10 @@ #include "postgres.h" #include "catalog/objectaddress.h" +#include "catalog/pg_database.h" #include "catalog/pg_ts_config.h" #include "catalog/pg_ts_dict.h" +#include "commands/dbcommands.h" #include "nodes/parsenodes.h" #include "tcop/utility.h" @@ -28,8 +30,6 @@ #include "distributed/metadata/distobject.h" #include "distributed/multi_executor.h" #include "distributed/worker_transaction.h" -#include "catalog/pg_database.h" -#include "commands/dbcommands.h" /* diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 1e89c6b93..768e20b73 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -11,7 +11,7 @@ #include "postgres.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/commands/utility_hook.h" #include "access/genam.h" diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 364b94e12..ba58a6d6a 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -10,52 +10,59 @@ */ #include "postgres.h" +#include "miscadmin.h" +#include "access/heapam.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/objectaddress.h" +#include "catalog/pg_collation.h" #include "catalog/pg_database.h" +#include "catalog/pg_database_d.h" +#include "catalog/pg_tablespace.h" #include "commands/dbcommands.h" -#include "miscadmin.h" #include "nodes/parsenodes.h" -#include "utils/syscache.h" #include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/relcache.h" +#include "utils/syscache.h" +#include "distributed/adaptive_executor.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" +#include "distributed/deparse_shard_query.h" #include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" #include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" -#include "distributed/worker_transaction.h" -#include "distributed/deparser.h" #include "distributed/worker_protocol.h" -#include "distributed/metadata/distobject.h" -#include "distributed/deparse_shard_query.h" -#include "distributed/listutils.h" -#include "distributed/adaptive_executor.h" -#include "access/htup_details.h" -#include "catalog/pg_tablespace.h" -#include "access/heapam.h" -#include "utils/relcache.h" -#include "utils/rel.h" -#include "utils/lsyscache.h" -#include "catalog/pg_collation.h" -#include "utils/relcache.h" -#include "catalog/pg_database_d.h" +#include "distributed/worker_transaction.h" +/* + * DatabaseCollationInfo is used to store collation related information of a database + */ +typedef struct DatabaseCollationInfo +{ + char *collation; + char *ctype; + #if PG_VERSION_NUM >= PG_VERSION_15 + char *icu_locale; + char *collversion; + #endif +} DatabaseCollationInfo; + static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid); - - -PG_FUNCTION_INFO_V1(citus_internal_database_command); static Oid get_database_owner(Oid db_oid); List * PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); /* controlled via GUC */ -bool EnableCreateDatabasePropagation = true; +bool EnableCreateDatabasePropagation = false; bool EnableAlterDatabaseOwner = true; /* @@ -306,7 +313,6 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, return NIL; } - AlterDatabaseSetStmt *stmt = castNode(AlterDatabaseSetStmt, node); EnsureCoordinator(); @@ -321,6 +327,13 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, } +/* + * PostprocessAlterDatabaseStmt is executed before the statement is applied to the local + * postgres instance. + * + * In this stage, we can perform validations and prepare the commands that need to + * be run on all workers to grant. + */ List * PreprocessCreateDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) @@ -361,82 +374,21 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString) (void *) createDatabaseCommand, ENABLE_DDL_PROPAGATION); - return NontransactionalNodeDDLTask(NON_COORDINATOR_NODES, commands); + return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands); } /* - * citus_internal_database_command is an internal UDF to - * create/drop a database in an idempotent maner without - * transaction block restrictions. + * PostprocessAlterDatabaseStmt is executed after the statement is applied to the local + * postgres instance. In this stage we can prepare the commands that need to be run on + * all workers to drop the database. Since the DROP DATABASE statement gives error in + * transaction context, we need to use NontransactionalNodeDDLTaskList to send the + * DROP DATABASE statement to the workers. */ -Datum -citus_internal_database_command(PG_FUNCTION_ARGS) -{ - int saveNestLevel = NewGUCNestLevel(); - text *commandText = PG_GETARG_TEXT_P(0); - char *command = text_to_cstring(commandText); - Node *parseTree = ParseTreeNode(command); - - set_config_option("citus.enable_ddl_propagation", "off", - (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, - GUC_ACTION_LOCAL, true, 0, false); - - set_config_option("citus.enable_create_database_propagation", "off", - (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, - GUC_ACTION_LOCAL, true, 0, false); - - /* - * createdb() / DropDatabase() uses ParseState to report the error position for the - * input command and the position is reported to be 0 when it's provided as NULL. - * We're okay with that because we don't expect this UDF to be called with an incorrect - * DDL command. - * - */ - ParseState *pstate = NULL; - - if (IsA(parseTree, CreatedbStmt)) - { - CreatedbStmt *stmt = castNode(CreatedbStmt, parseTree); - - bool missingOk = true; - Oid databaseOid = get_database_oid(stmt->dbname, missingOk); - - if (!OidIsValid(databaseOid)) - { - createdb(pstate, (CreatedbStmt *) parseTree); - } - } - else if (IsA(parseTree, DropdbStmt)) - { - DropdbStmt *stmt = castNode(DropdbStmt, parseTree); - - bool missingOk = false; - Oid databaseOid = get_database_oid(stmt->dbname, missingOk); - - - if (OidIsValid(databaseOid)) - { - DropDatabase(pstate, (DropdbStmt *) parseTree); - } - } - else - { - ereport(ERROR, (errmsg("unsupported command type %d", nodeTag(parseTree)))); - } - - /* Below command rollbacks flags to the state before this session*/ - AtEOXact_GUC(true, saveNestLevel); - - PG_RETURN_VOID(); -} - - List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { - bool isPostProcess = false; if (!EnableCreateDatabasePropagation || !ShouldPropagate()) { return NIL; @@ -446,41 +398,50 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, DropdbStmt *stmt = (DropdbStmt *) node; + bool isPostProcess = false; List *addresses = GetObjectAddressListFromParseTree(node, stmt->missing_ok, isPostProcess); - if (list_length(addresses) == 0) + if (list_length(addresses) != 1) { - return NIL; + ereport(ERROR, (errmsg("unexpected number of objects found when " + "executing DROP DATABASE command"))); } ObjectAddress *address = (ObjectAddress *) linitial(addresses); - if (address->objectId == InvalidOid || !IsObjectDistributed(address)) + if (address->objectId == InvalidOid || !IsAnyObjectDistributed(list_make1(address))) { return NIL; } char *dropDatabaseCommand = DeparseTreeNode(node); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) dropDatabaseCommand, ENABLE_DDL_PROPAGATION); - return NontransactionalNodeDDLTask(NON_COORDINATOR_NODES, commands); + return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands); } +/* + * GetDatabaseAddressFromDatabaseName gets the database name and returns the ObjectAddress + * of the database. + */ static ObjectAddress * GetDatabaseAddressFromDatabaseName(char *databaseName, bool missingOk) { Oid databaseOid = get_database_oid(databaseName, missingOk); - ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress)); - ObjectAddressSet(*dbAddress, DatabaseRelationId, databaseOid); - return dbAddress; + ObjectAddress *dbObjectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*dbObjectAddress, DatabaseRelationId, databaseOid); + return dbObjectAddress; } +/* + * DropDatabaseStmtObjectAddress gets the ObjectAddress of the database that is the + * object of the DropdbStmt. + */ List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { @@ -491,6 +452,10 @@ DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) } +/* + * CreateDatabaseStmtObjectAddress gets the ObjectAddress of the database that is the + * object of the CreatedbStmt. + */ List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { @@ -501,6 +466,9 @@ CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) } +/* + * GetTablespaceName gets the tablespace oid and returns the tablespace name. + */ static char * GetTablespaceName(Oid tablespaceOid) { @@ -519,19 +487,6 @@ GetTablespaceName(Oid tablespaceOid) } -/* - * DatabaseCollationInfo is used to store collation related information of a database - */ -typedef struct DatabaseCollationInfo -{ - char *collation; - char *ctype; - #if PG_VERSION_NUM >= PG_VERSION_15 - char *icu_locale; - char *collversion; - #endif -} DatabaseCollationInfo; - /* * GetDatabaseCollation gets oid of a database and returns all the collation related information * We need this method since collation related info in Form_pg_database is not accessible @@ -605,6 +560,9 @@ GetDatabaseCollation(Oid db_oid) } +/* + * FreeDatabaseCollationInfo frees the memory allocated for DatabaseCollationInfo + */ static void FreeDatabaseCollationInfo(DatabaseCollationInfo collInfo) { @@ -626,8 +584,13 @@ FreeDatabaseCollationInfo(DatabaseCollationInfo collInfo) #if PG_VERSION_NUM >= PG_VERSION_15 + +/* + * GetLocaleProviderString gets the datlocprovider stored in pg_database + * and returns the string representation of the datlocprovider + */ static char * -get_locale_provider_string(char datlocprovider) +GetLocaleProviderString(char datlocprovider) { switch (datlocprovider) { @@ -656,7 +619,8 @@ get_locale_provider_string(char datlocprovider) /* - * GenerateCreateDatabaseStatementFromPgDatabase is gets the pg_database tuple and returns the CREATE DATABASE statement + * GenerateCreateDatabaseStatementFromPgDatabase gets the pg_database tuple and returns the + * CREATE DATABASE statement that can be used to create given database. */ static char * GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) @@ -666,66 +630,64 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) StringInfoData str; initStringInfo(&str); - appendStringInfo(&str, "CREATE DATABASE %s", quote_identifier(NameStr( - databaseForm-> - datname))); + appendStringInfo(&str, "CREATE DATABASE %s", + quote_identifier(NameStr(databaseForm->datname))); if (databaseForm->datdba != InvalidOid) { - appendStringInfo(&str, " OWNER = %s", GetUserNameFromId(databaseForm->datdba, - false)); + appendStringInfo(&str, " OWNER = %s", + quote_literal_cstr(GetUserNameFromId(databaseForm->datdba,false))); } if (databaseForm->encoding != -1) { - appendStringInfo(&str, " ENCODING = '%s'", pg_encoding_to_char( - databaseForm->encoding)); + appendStringInfo(&str, " ENCODING = %s", + quote_literal_cstr(pg_encoding_to_char(databaseForm->encoding))); } if (collInfo.collation != NULL) { - appendStringInfo(&str, " LC_COLLATE = '%s'", collInfo.collation); + appendStringInfo(&str, " LC_COLLATE = %s", quote_literal_cstr(collInfo.collation)); } if (collInfo.ctype != NULL) { - appendStringInfo(&str, " LC_CTYPE = '%s'", collInfo.ctype); + appendStringInfo(&str, " LC_CTYPE = %s", quote_literal_cstr(collInfo.ctype)); } #if PG_VERSION_NUM >= PG_VERSION_15 if (collInfo.icu_locale != NULL) { - appendStringInfo(&str, " ICU_LOCALE = '%s'", collInfo.icu_locale); + appendStringInfo(&str, " ICU_LOCALE = %s", quote_literal_cstr(collInfo.icu_locale)); } if (databaseForm->datlocprovider != 0) { - appendStringInfo(&str, " LOCALE_PROVIDER = '%s'", get_locale_provider_string( - databaseForm->datlocprovider)); + appendStringInfo(&str, " LOCALE_PROVIDER = %s", + quote_literal_cstr(GetLocaleProviderString(databaseForm->datlocprovider))); } if (collInfo.collversion != NULL) { - appendStringInfo(&str, " COLLATION_VERSION = '%s'", collInfo.collversion); + appendStringInfo(&str, " COLLATION_VERSION = %s", quote_literal_cstr(collInfo.collversion)); } #endif if (databaseForm->dattablespace != InvalidOid) { - appendStringInfo(&str, " TABLESPACE = %s", quote_identifier(GetTablespaceName( - databaseForm-> - dattablespace))); + appendStringInfo(&str, " TABLESPACE = %s", + quote_identifier(GetTablespaceName(databaseForm->dattablespace))); } - appendStringInfo(&str, " ALLOW_CONNECTIONS = '%s'", databaseForm->datallowconn ? - "true" : "false"); + appendStringInfo(&str, " ALLOW_CONNECTIONS = %s", + quote_literal_cstr(databaseForm->datallowconn ?"true" : "false")); if (databaseForm->datconnlimit >= 0) { appendStringInfo(&str, " CONNECTION LIMIT %d", databaseForm->datconnlimit); } - appendStringInfo(&str, " IS_TEMPLATE = '%s'", databaseForm->datistemplate ? "true" : - "false"); + appendStringInfo(&str, " IS_TEMPLATE = %s", + quote_literal_cstr(databaseForm->datistemplate ? "true" :"false")); FreeDatabaseCollationInfo(collInfo); @@ -735,19 +697,21 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) /* - * GenerateCreateDatabaseCommandList is gets the pg_database tuples and returns the CREATE DATABASE statement list - * for all the databases in the cluster.citus_internal_database_command UDF is used to send the CREATE DATABASE - * statement to the workers since the CREATE DATABASE statement gives error in transaction context. + * GenerateCreateDatabaseCommandList gets a list of pg_database tuples and returns + * a list of CREATE DATABASE statements for all the databases. + * + * Commands in the list are wrapped by citus_internal_database_command() UDF + * to avoid from transaction block restrictions that apply to database commands */ List * GenerateCreateDatabaseCommandList(void) { List *commands = NIL; - HeapTuple tuple; Relation pgDatabaseRel = table_open(DatabaseRelationId, AccessShareLock); TableScanDesc scan = table_beginscan_catalog(pgDatabaseRel, 0, NULL); + HeapTuple tuple = NULL; while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { Form_pg_database databaseForm = (Form_pg_database) GETSTRUCT(tuple); @@ -759,7 +723,7 @@ GenerateCreateDatabaseCommandList(void) /* Generate the CREATE DATABASE statement */ appendStringInfo(outerDbStmt, - "select pg_catalog.citus_internal_database_command( %s)", + "SELECT pg_catalog.citus_internal_database_command(%s)", quote_literal_cstr( createStmt)); diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index 977efb145..e309ee86c 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -40,14 +40,14 @@ static char * DropTableIfExistsCommand(Oid relationId); /* * EnsureDependenciesExistOnAllNodes finds all the dependencies that we support and makes - * sure these are available on all workers. If not available they will be created on the - * workers via a separate session that will be committed directly so that the objects are + * sure these are available on all nodes. If not available they will be created on the + * nodes via a separate session that will be committed directly so that the objects are * visible to potentially multiple sessions creating the shards. * * Note; only the actual objects are created via a separate session, the records to * pg_dist_object are created in this session. As a side effect the objects could be - * created on the workers without a catalog entry. Updates to the objects on the coordinator - * are not propagated to the workers until the record is visible on the coordinator. + * created on the nodes without a catalog entry. Updates to the objects on local node + * are not propagated to the remote nodes until the record is visible on local node. * * This is solved by creating the dependencies in an idempotent manner, either via * postgres native CREATE IF NOT EXISTS, or citus helper functions. @@ -95,7 +95,7 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) * either get it now, or get it in citus_add_node after this transaction finishes and * the pg_dist_object record becomes visible. */ - List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(RowShareLock); + List *remoteNodeList = ActivePrimaryRemoteNodeList(RowShareLock); /* * Lock dependent objects explicitly to make sure same DDL command won't be sent @@ -127,12 +127,12 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) */ if (HasAnyDependencyInPropagatedObjects(target)) { - SendCommandListToWorkersWithMetadata(ddlCommands); + SendCommandListToRemoteNodesWithMetadata(ddlCommands); } else { WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_ptr(workerNode, remoteNodeList) { const char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; @@ -144,8 +144,8 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) } /* - * We do this after creating the objects on the workers, we make sure - * that objects have been created on worker nodes before marking them + * We do this after creating the objects on remote nodes, we make sure + * that objects have been created on remote nodes before marking them * distributed, so MarkObjectDistributed wouldn't fail. */ foreach_ptr(dependency, dependenciesWithCommands) diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 3063a5705..cc4aba18d 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -14,7 +14,7 @@ #include "distributed/commands.h" #include "distributed/deparser.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/version_compat.h" #include "distributed/commands/utility_hook.h" diff --git a/src/backend/distributed/commands/foreign_constraint.c b/src/backend/distributed/commands/foreign_constraint.c index 7c2d50f44..709287c56 100644 --- a/src/backend/distributed/commands/foreign_constraint.c +++ b/src/backend/distributed/commands/foreign_constraint.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/htup_details.h" #include "access/sysattr.h" diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 01911677d..701041673 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -21,7 +21,7 @@ #include "miscadmin.h" #include "funcapi.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" @@ -978,7 +978,6 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) char *argmodes = NULL; int insertorderbyat = -1; int argsprinted = 0; - int inputargno = 0; HeapTuple proctup = SearchSysCache1(PROCOID, funcOid); if (!HeapTupleIsValid(proctup)) @@ -1058,7 +1057,6 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) } } - inputargno++; /* this is a 1-based counter */ if (argsprinted == insertorderbyat) { appendStringInfoString(&buf, " ORDER BY "); diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 275f253b3..0b5cfb812 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" #include "access/xact.h" @@ -180,6 +180,8 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand, return NIL; } + EnsureCoordinator(); + if (createIndexStatement->idxname == NULL) { /* diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index a684d06cc..a5c7a47f4 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -52,7 +52,7 @@ #include /* for htons */ #include -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/htup_details.h" #include "access/htup.h" diff --git a/src/backend/distributed/commands/publication.c b/src/backend/distributed/commands/publication.c index 581f7f874..f225b0fca 100644 --- a/src/backend/distributed/commands/publication.c +++ b/src/backend/distributed/commands/publication.c @@ -175,7 +175,6 @@ BuildCreatePublicationStmt(Oid publicationId) PUBLICATION_PART_ROOT : PUBLICATION_PART_LEAF); Oid relationId = InvalidOid; - int citusTableCount PG_USED_FOR_ASSERTS_ONLY = 0; /* mainly for consistent ordering in test output */ relationIds = SortList(relationIds, CompareOids); @@ -199,11 +198,6 @@ BuildCreatePublicationStmt(Oid publicationId) createPubStmt->tables = lappend(createPubStmt->tables, rangeVar); #endif - - if (IsCitusTable(relationId)) - { - citusTableCount++; - } } /* WITH (publish_via_partition_root = true) option */ diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 792efd934..f3ac7b4ff 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -12,7 +12,7 @@ #include "pg_version_compat.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/heapam.h" #include "access/htup_details.h" @@ -65,6 +65,7 @@ static DefElem * makeDefElemBool(char *name, bool value); static List * GenerateRoleOptionsList(HeapTuple tuple); static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options); static List * GenerateGrantRoleStmtsOfRole(Oid roleid); +static void EnsureSequentialModeForRoleDDL(void); static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple, TupleDesc DbRoleSettingDescription); @@ -155,7 +156,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString) return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); AlterRoleStmt *stmt = castNode(AlterRoleStmt, node); @@ -184,7 +185,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString) (void *) CreateAlterRoleIfExistsCommand(stmt), ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -230,7 +231,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); @@ -239,7 +240,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString, (void *) sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList); + return NodeDDLTaskList(REMOTE_NODES, commandList); } @@ -909,7 +910,8 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); + EnsureSequentialModeForRoleDDL(); LockRelationOid(DistNodeRelationId(), RowShareLock); @@ -944,7 +946,7 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString, commands = lappend(commands, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -1040,7 +1042,8 @@ PreprocessDropRoleStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); + EnsureSequentialModeForRoleDDL(); @@ -1052,7 +1055,7 @@ PreprocessDropRoleStmt(Node *node, const char *queryString, sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -1129,7 +1132,7 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); List *allGranteeRoles = stmt->grantee_roles; @@ -1169,7 +1172,7 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -1180,11 +1183,13 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, List * PostprocessGrantRoleStmt(Node *node, const char *queryString) { - if (!EnableCreateRolePropagation || !IsCoordinator() || !ShouldPropagate()) + if (!EnableCreateRolePropagation || !ShouldPropagate()) { return NIL; } + EnsurePropagationToCoordinator(); + GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); RoleSpec *role = NULL; @@ -1332,7 +1337,7 @@ PreprocessAlterRoleRenameStmt(Node *node, const char *queryString, Assert(stmt->renameType == OBJECT_ROLE); - EnsureCoordinator(); + EnsurePropagationToCoordinator(); char *sql = DeparseTreeNode((Node *) stmt); @@ -1340,7 +1345,7 @@ PreprocessAlterRoleRenameStmt(Node *node, const char *queryString, (void *) sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } diff --git a/src/backend/distributed/commands/subscription.c b/src/backend/distributed/commands/subscription.c index 59603b559..52519b680 100644 --- a/src/backend/distributed/commands/subscription.c +++ b/src/backend/distributed/commands/subscription.c @@ -17,7 +17,7 @@ #include "commands/defrem.h" #include "distributed/commands.h" #include "distributed/connection_management.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/version_compat.h" #include "libpq-fe.h" #include "nodes/parsenodes.h" diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 500c6f3f2..e8404d38c 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -9,7 +9,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" #include "access/xact.h" diff --git a/src/backend/distributed/commands/trigger.c b/src/backend/distributed/commands/trigger.c index 7577dfd31..0ec8287f5 100644 --- a/src/backend/distributed/commands/trigger.c +++ b/src/backend/distributed/commands/trigger.c @@ -9,7 +9,7 @@ *------------------------------------------------------------------------- */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/table.h" diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index 02e5f0dee..ccb7bf528 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -43,7 +43,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 0d400d139..5d7fa3947 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -25,7 +25,8 @@ *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" + +#include "pg_version_constants.h" #include "postgres.h" #include "miscadmin.h" @@ -35,6 +36,7 @@ #include "access/htup_details.h" #include "catalog/catalog.h" #include "catalog/dependency.h" +#include "catalog/pg_database.h" #include "citus_version.h" #include "commands/dbcommands.h" #include "commands/defrem.h" @@ -62,6 +64,7 @@ #include "distributed/multi_executor.h" #include "distributed/multi_explain.h" #include "distributed/multi_physical_planner.h" +#include "distributed/pg_version_constants.h" #include "distributed/reference_table_utils.h" #include "distributed/resource_lock.h" #include "distributed/string_utils.h" @@ -80,7 +83,6 @@ #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/syscache.h" -#include "catalog/pg_database.h" bool EnableDDLPropagation = true; /* ddl propagation is enabled */ @@ -579,7 +581,6 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, PreprocessLockStatement((LockStmt *) parsetree, context); } - /* * We only process ALTER TABLE ... ATTACH PARTITION commands in the function below * and distribute the partition if necessary. @@ -710,9 +711,9 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, } else if (IsA(parsetree, CreateRoleStmt) && !EnableCreateRolePropagation) { - ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to worker" + ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to other" " nodes"), - errhint("Connect to worker nodes directly to manually create all" + errhint("Connect to other nodes directly to manually create all" " necessary users and roles."))); } @@ -726,12 +727,13 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, } /* - * Make sure that dropping the role and database deletes the pg_dist_object entries. There is a - * separate logic for roles and database, since roles and database are not included as dropped objects in the - * drop event trigger. To handle it both on worker and coordinator nodes, it is not - * implemented as a part of process functions but here. + * Make sure that dropping node-wide objects deletes the pg_dist_object + * entries. There is a separate logic for node-wide objects (such as role + * and databases), since they are not included as dropped objects in the + * drop event trigger. To handle it both on worker and coordinator nodes, + * it is not implemented as a part of process functions but here. */ - UnmarkRolesAndDatabaseDistributed(parsetree); + UnmarkNodeWideObjectsDistributed(parsetree); pstmt->utilityStmt = parsetree; @@ -1098,16 +1100,17 @@ IsDropSchemaOrDB(Node *parsetree) * each shard placement and COMMIT/ROLLBACK is handled by * CoordinatedTransactionCallback function. * - * The function errors out if the node is not the coordinator or if the DDL is on - * a partitioned table which has replication factor > 1. - * + * The function errors out if the DDL is on a partitioned table which has replication + * factor > 1, or if the the coordinator is not added into metadata and we're on a + * worker node because we want to make sure that distributed DDL jobs are executed + * on the coordinator node too. See EnsurePropagationToCoordinator() for more details. */ void ExecuteDistributedDDLJob(DDLJob *ddlJob) { bool shouldSyncMetadata = false; - EnsureCoordinator(); + EnsurePropagationToCoordinator(); ObjectAddress targetObjectAddress = ddlJob->targetObjectAddress; @@ -1131,23 +1134,24 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) { if (shouldSyncMetadata) { - SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(DISABLE_DDL_PROPAGATION); char *currentSearchPath = CurrentSearchPath(); /* - * Given that we're relaying the query to the worker nodes directly, + * Given that we're relaying the query to the remote nodes directly, * we should set the search path exactly the same when necessary. */ if (currentSearchPath != NULL) { - SendCommandToWorkersWithMetadata( + SendCommandToRemoteNodesWithMetadata( psprintf("SET LOCAL search_path TO %s;", currentSearchPath)); } if (ddlJob->metadataSyncCommand != NULL) { - SendCommandToWorkersWithMetadata((char *) ddlJob->metadataSyncCommand); + SendCommandToRemoteNodesWithMetadata( + (char *) ddlJob->metadataSyncCommand); } } @@ -1226,7 +1230,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) char *currentSearchPath = CurrentSearchPath(); /* - * Given that we're relaying the query to the worker nodes directly, + * Given that we're relaying the query to the remote nodes directly, * we should set the search path exactly the same when necessary. */ if (currentSearchPath != NULL) @@ -1238,7 +1242,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) commandList = lappend(commandList, (char *) ddlJob->metadataSyncCommand); - SendBareCommandListToMetadataWorkers(commandList); + SendBareCommandListToRemoteMetadataNodes(commandList); } } PG_CATCH(); @@ -1265,10 +1269,12 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) { ereport(WARNING, (errmsg( - "Commands that are not transaction-safe may result in partial failure" - ", potentially leading to an inconsistent state.\nIf the problematic command" - " is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the " - "object,\nif applicable, and then reattempt the original command."))); + "Commands that are not transaction-safe may result in " + "partial failure, potentially leading to an inconsistent " + "state.\nIf the problematic command is a CREATE operation, " + "consider using the 'IF EXISTS' syntax to drop the object," + "\nif applicable, and then re-attempt the original command."))); + PG_RE_THROW(); } } @@ -1483,12 +1489,12 @@ DDLTaskList(Oid relationId, const char *commandString) /* - * NontransactionalNodeDDLTask builds a list of tasks to execute a DDL command on a + * NontransactionalNodeDDLTaskList builds a list of tasks to execute a DDL command on a * given target set of nodes with cannotBeExecutedInTransaction is set to make sure - * that list is being executed without a transaction. + * that task list is executed outside a transaction block. */ List * -NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands) +NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands) { List *ddlJobs = NodeDDLTaskList(targets, commands); DDLJob *ddlJob = NULL; diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index 21638ba7f..f1cf3cb31 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "commands/defrem.h" #include "commands/vacuum.h" @@ -184,7 +184,6 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList, CitusVacuumParams vacuumParams) { int relationIndex = 0; - int executedVacuumCount = 0; Oid relationId = InvalidOid; foreach_oid(relationId, relationIdList) @@ -197,7 +196,6 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList, /* local execution is not implemented for VACUUM commands */ bool localExecutionSupported = false; ExecuteUtilityTaskList(taskList, localExecutionSupported); - executedVacuumCount++; } relationIndex++; } diff --git a/src/backend/distributed/connection/locally_reserved_shared_connections.c b/src/backend/distributed/connection/locally_reserved_shared_connections.c index e3f7cb628..0a27ba17c 100644 --- a/src/backend/distributed/connection/locally_reserved_shared_connections.c +++ b/src/backend/distributed/connection/locally_reserved_shared_connections.c @@ -33,7 +33,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" @@ -302,8 +302,8 @@ EnsureConnectionPossibilityForRemotePrimaryNodes(void) * seem to cause any problems as none of the placements that we are * going to access would be on the new node. */ - List *primaryNodeList = ActivePrimaryRemoteNodeList(NoLock); - EnsureConnectionPossibilityForNodeList(primaryNodeList); + List *remoteNodeList = ActivePrimaryRemoteNodeList(NoLock); + EnsureConnectionPossibilityForNodeList(remoteNodeList); } diff --git a/src/backend/distributed/connection/placement_connection.c b/src/backend/distributed/connection/placement_connection.c index cc7962e37..3924e5a05 100644 --- a/src/backend/distributed/connection/placement_connection.c +++ b/src/backend/distributed/connection/placement_connection.c @@ -11,7 +11,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/hash.h" #include "distributed/colocation_utils.h" diff --git a/src/backend/distributed/connection/shared_connection_stats.c b/src/backend/distributed/connection/shared_connection_stats.c index fcd396fe4..104caed07 100644 --- a/src/backend/distributed/connection/shared_connection_stats.c +++ b/src/backend/distributed/connection/shared_connection_stats.c @@ -13,7 +13,7 @@ #include "postgres.h" #include "pgstat.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "libpq-fe.h" diff --git a/src/backend/distributed/deparser/citus_deparseutils.c b/src/backend/distributed/deparser/citus_deparseutils.c index 6492c14f2..0cfd7dd6f 100644 --- a/src/backend/distributed/deparser/citus_deparseutils.c +++ b/src/backend/distributed/deparser/citus_deparseutils.c @@ -1,56 +1,66 @@ - +/* + * citus_deparseutils.c + * --------------------- + * + * This file contains common functions used for deparsing PostgreSQL statements + * to their equivalent SQL representation. + * + */ #include "postgres.h" -#include "utils/builtins.h" + #include "commands/defrem.h" +#include "distributed/deparser.h" +#include "distributed/pg_version_constants.h" +#include "utils/builtins.h" #include "utils/elog.h" #include "utils/rel.h" #include "utils/relcache.h" #include "utils/syscache.h" #include "utils/typcache.h" -#include "distributed/deparser.h" -#include "distributed/pg_version_constants.h" /** - * Convert a DefElem option to a SQL statement and append it to the given StringInfo buffer. + * DefElemOptionToStatement converts a DefElem option to a SQL statement and + * appends it to the given StringInfo buffer. * * @param buf The StringInfo buffer to append the SQL statement to. * @param option The DefElem option to convert to a SQL statement. - * @param opt_formats The option format specification to use for the conversion. - * @param num_opt_formats The number of option formats in the opt_formats array. + * @param optionFormats The option format specification to use for the conversion. + * @param optionFormatsLen The number of option formats in the opt_formats array. */ void -optionToStatement(StringInfo buf, DefElem *option, const struct - option_format *opt_formats, int - opt_formats_len) +DefElemOptionToStatement(StringInfo buf, DefElem *option, const + DefElemOptionFormat *optionFormats, int + optionFormatsLen) { const char *name = option->defname; int i; - for (i = 0; i < opt_formats_len; i++) + for (i = 0; i < optionFormatsLen; i++) { - if (strcmp(name, opt_formats[i].name) == 0) + if (strcmp(name, optionFormats[i].name) == 0) { - switch (opt_formats[i].type) + switch (optionFormats[i].type) { case OPTION_FORMAT_STRING: { char *value = defGetString(option); - appendStringInfo(buf, opt_formats[i].format, quote_identifier(value)); + appendStringInfo(buf, optionFormats[i].format, quote_identifier( + value)); break; } case OPTION_FORMAT_INTEGER: { int32 value = defGetInt32(option); - appendStringInfo(buf, opt_formats[i].format, value); + appendStringInfo(buf, optionFormats[i].format, value); break; } case OPTION_FORMAT_BOOLEAN: { bool value = defGetBoolean(option); - appendStringInfo(buf, opt_formats[i].format, value ? "true" : + appendStringInfo(buf, optionFormats[i].format, value ? "true" : "false"); break; } @@ -59,7 +69,7 @@ optionToStatement(StringInfo buf, DefElem *option, const struct case OPTION_FORMAT_OBJECT_ID: { Oid value = defGetObjectId(option); - appendStringInfo(buf, opt_formats[i].format, value); + appendStringInfo(buf, optionFormats[i].format, value); break; } @@ -67,14 +77,14 @@ optionToStatement(StringInfo buf, DefElem *option, const struct case OPTION_FORMAT_LITERAL_CSTR: { char *value = defGetString(option); - appendStringInfo(buf, opt_formats[i].format, quote_literal_cstr( + appendStringInfo(buf, optionFormats[i].format, quote_literal_cstr( value)); break; } default: { - elog(ERROR, "unrecognized option type: %d", opt_formats[i].type); + elog(ERROR, "unrecognized option type: %d", optionFormats[i].type); break; } } diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index 220ea3ec7..1456f2fb5 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -10,7 +10,7 @@ #include "postgres.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c index 986c1bb86..bd617f9aa 100644 --- a/src/backend/distributed/deparser/deparse_database_stmts.c +++ b/src/backend/distributed/deparser/deparse_database_stmts.c @@ -12,24 +12,24 @@ #include "postgres.h" #include "pg_version_compat.h" - #include "catalog/namespace.h" #include "lib/stringinfo.h" #include "nodes/parsenodes.h" #include "utils/builtins.h" -#include "distributed/deparser.h" -#include "distributed/citus_ruleutils.h" #include "commands/defrem.h" #include "distributed/deparser.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" #include "distributed/log_utils.h" #include "parser/parse_type.h" -#include "distributed/listutils.h" + static void AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt); static void AppendAlterDatabaseStmt(StringInfo buf, AlterDatabaseStmt *stmt); -const struct option_format create_database_option_formats[] = { +const DefElemOptionFormat create_database_option_formats[] = { { "owner", " OWNER %s", OPTION_FORMAT_STRING }, { "template", " TEMPLATE %s", OPTION_FORMAT_STRING }, { "encoding", " ENCODING %s", OPTION_FORMAT_LITERAL_CSTR }, @@ -49,12 +49,29 @@ const struct option_format create_database_option_formats[] = { }; -const struct option_format alter_database_option_formats[] = { +const DefElemOptionFormat alter_database_option_formats[] = { { "is_template", " IS_TEMPLATE %s", OPTION_FORMAT_BOOLEAN }, { "allow_connections", " ALLOW_CONNECTIONS %s", OPTION_FORMAT_BOOLEAN }, { "connection_limit", " CONNECTION LIMIT %d", OPTION_FORMAT_INTEGER }, }; + +/* + * DeparseAlterDatabaseOwnerStmt + * Deparse an AlterDatabaseOwnerStmt node + * + * This function is responsible for producing a string representation of an + * AlterDatabaseOwnerStmt node, which represents an ALTER DATABASE statement + * that changes the owner of a database. The output string includes the ALTER + * DATABASE keyword, the name of the database being altered, and the new owner + * of the database. + * + * Parameters: + * - node: a pointer to the AlterDatabaseOwnerStmt node to be deparsed + * + * Returns: + * - a string representation of the ALTER DATABASE statement + */ char * DeparseAlterDatabaseOwnerStmt(Node *node) { @@ -70,6 +87,15 @@ DeparseAlterDatabaseOwnerStmt(Node *node) } +/* + * + * AppendAlterDatabaseOwnerStmt + * Append an ALTER DATABASE statement for changing the owner of a database to the given StringInfo buffer. + * + * Parameters: + * - buf: The StringInfo buffer to append the statement to. + * - stmt: The AlterOwnerStmt representing the ALTER DATABASE statement to append. + */ static void AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt) { @@ -256,6 +282,34 @@ DeparseAlterDatabaseSetStmt(Node *node) } +/* + * Validates for if option is template, lc_type, locale or lc_collate, propagation will + * not be supported since template and strategy options are not stored in the catalog + * and lc_type, locale and lc_collate options depends on template parameter. + */ +static void +ValidateCreateDatabaseOptions(DefElem *option) +{ + if (strcmp(option->defname, "strategy") == 0){ + ereport(ERROR, + errmsg("CREATE DATABASE option \"%s\" is not supported", + option->defname)); + } + + char *optionValue = defGetString(option); + if (strcmp(option->defname,"template") == 0 && strcmp(optionValue, "template1") != 0) + { + + ereport(ERROR,errmsg("Only template1 is supported as template parameter for CREATE DATABASE")); + + } + +} + + +/* + * Prepares a CREATE DATABASE statement with given empty StringInfo buffer and CreatedbStmt node. + */ static void AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt) { @@ -267,27 +321,18 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt) foreach_ptr(option, stmt->options) { - /*If option is template, lc_type, locale or lc_collate, propagation will not be supportted */ - /* since template database is not stored in the catalog */ - if (strcmp(option->defname, "template") == 0 || - strcmp(option->defname, "strategy") == 0 || - strcmp(option->defname, "lc_ctype") == 0 || - strcmp(option->defname, "locale") == 0 || - strcmp(option->defname, "lc_collate") == 0 || - strcmp(option->defname, "icu_locale") == 0 || - strcmp(option->defname, "locale_provider") == 0) - { - ereport(ERROR, - errmsg("CREATE DATABASE option \"%s\" is not supported", - option->defname)); - } + ValidateCreateDatabaseOptions(option); - optionToStatement(buf, option, create_database_option_formats, lengthof( - create_database_option_formats)); + DefElemOptionToStatement(buf, option, create_database_option_formats, + lengthof(create_database_option_formats)); } } +/* + * Converts a CreatedbStmt structure into a SQL command string. + * Used in the deparsing of Create database statement. + */ char * DeparseCreateDatabaseStmt(Node *node) { @@ -301,13 +346,16 @@ DeparseCreateDatabaseStmt(Node *node) } +/* + * Prepares a DROP DATABASE statement with given empty StringInfo buffer and DropdbStmt node. + */ static void AppendDropDatabaseStmt(StringInfo buf, DropdbStmt *stmt) { - char *if_exists_statement = stmt->missing_ok ? "IF EXISTS" : ""; + char *ifExistsStatement = stmt->missing_ok ? "IF EXISTS" : ""; appendStringInfo(buf, "DROP DATABASE %s %s", - if_exists_statement, + ifExistsStatement, quote_identifier(stmt->dbname)); DefElem *option = NULL; @@ -328,6 +376,10 @@ AppendDropDatabaseStmt(StringInfo buf, DropdbStmt *stmt) } +/* + * Converts a DropdbStmt structure into a SQL command string. + * Used in the deparsing of drop database statement. + */ char * DeparseDropDatabaseStmt(Node *node) { diff --git a/src/backend/distributed/deparser/deparse_statistics_stmts.c b/src/backend/distributed/deparser/deparse_statistics_stmts.c index 4a165ec72..599738dc5 100644 --- a/src/backend/distributed/deparser/deparse_statistics_stmts.c +++ b/src/backend/distributed/deparser/deparse_statistics_stmts.c @@ -12,7 +12,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/namespace.h" #include "distributed/citus_ruleutils.h" diff --git a/src/backend/distributed/deparser/ruleutils_14.c b/src/backend/distributed/deparser/ruleutils_14.c index 6ab124537..01b74eab1 100644 --- a/src/backend/distributed/deparser/ruleutils_14.c +++ b/src/backend/distributed/deparser/ruleutils_14.c @@ -14,7 +14,7 @@ * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_config.h" diff --git a/src/backend/distributed/deparser/ruleutils_15.c b/src/backend/distributed/deparser/ruleutils_15.c index 755e0f4cd..a84f8b113 100644 --- a/src/backend/distributed/deparser/ruleutils_15.c +++ b/src/backend/distributed/deparser/ruleutils_15.c @@ -14,7 +14,7 @@ * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_config.h" diff --git a/src/backend/distributed/deparser/ruleutils_16.c b/src/backend/distributed/deparser/ruleutils_16.c index 31e8823b1..10373e487 100644 --- a/src/backend/distributed/deparser/ruleutils_16.c +++ b/src/backend/distributed/deparser/ruleutils_16.c @@ -14,7 +14,7 @@ * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_config.h" diff --git a/src/backend/distributed/executor/citus_custom_scan.c b/src/backend/distributed/executor/citus_custom_scan.c index a2a2ff6cb..3403e27ca 100644 --- a/src/backend/distributed/executor/citus_custom_scan.c +++ b/src/backend/distributed/executor/citus_custom_scan.c @@ -9,7 +9,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" diff --git a/src/backend/distributed/executor/distributed_intermediate_results.c b/src/backend/distributed/executor/distributed_intermediate_results.c index c10303e18..cc351a1fc 100644 --- a/src/backend/distributed/executor/distributed_intermediate_results.c +++ b/src/backend/distributed/executor/distributed_intermediate_results.c @@ -8,7 +8,7 @@ *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include #include diff --git a/src/backend/distributed/executor/local_executor.c b/src/backend/distributed/executor/local_executor.c index 5661403b9..7168fd314 100644 --- a/src/backend/distributed/executor/local_executor.c +++ b/src/backend/distributed/executor/local_executor.c @@ -78,7 +78,7 @@ #include "postgres.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/adaptive_executor.h" #include "distributed/commands/utility_hook.h" @@ -567,7 +567,7 @@ LogLocalCommand(Task *task) * * One slightly different case is modifications to replicated tables * (e.g., reference tables) where a single task ends in two separate tasks - * and the local task is added to localTaskList and the remaning ones to + * and the local task is added to localTaskList and the remaining ones to * the remoteTaskList. */ void diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index 662eaaf97..306698251 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" diff --git a/src/backend/distributed/executor/query_stats.c b/src/backend/distributed/executor/query_stats.c index 1ac70489c..b59777d45 100644 --- a/src/backend/distributed/executor/query_stats.c +++ b/src/backend/distributed/executor/query_stats.c @@ -15,7 +15,7 @@ #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/hash.h" #include "catalog/pg_authid.h" diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index f970cecd1..989e957af 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -11,7 +11,7 @@ #include "postgres.h" #include "distributed/commands.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/heapam.h" diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index af8354ee3..afdaf57c1 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" @@ -55,6 +55,7 @@ static char * CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress); static int ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, Datum *paramValues); +static bool IsObjectDistributed(const ObjectAddress *address); PG_FUNCTION_INFO_V1(citus_unmark_object_distributed); PG_FUNCTION_INFO_V1(master_unmark_object_distributed); @@ -150,7 +151,7 @@ ObjectExists(const ObjectAddress *address) /* * MarkObjectDistributed marks an object as a distributed object. Marking is done * by adding appropriate entries to citus.pg_dist_object and also marking the object - * as distributed by opening a connection using current user to all of the workers + * as distributed by opening a connection using current user to all remote nodes * with metadata if object propagation is on. * * This function should be used if the user creating the given object. If you want @@ -165,7 +166,7 @@ MarkObjectDistributed(const ObjectAddress *distAddress) { char *workerPgDistObjectUpdateCommand = CreatePgDistObjectEntryCommand(distAddress); - SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand); + SendCommandToRemoteNodesWithMetadata(workerPgDistObjectUpdateCommand); } } @@ -173,7 +174,7 @@ MarkObjectDistributed(const ObjectAddress *distAddress) /* * MarkObjectDistributedViaSuperUser marks an object as a distributed object. Marking * is done by adding appropriate entries to citus.pg_dist_object and also marking the - * object as distributed by opening a connection using super user to all of the workers + * object as distributed by opening a connection using super user to all remote nodes * with metadata if object propagation is on. * * This function should be used to mark dependent object as distributed. If you want @@ -188,7 +189,7 @@ MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress) { char *workerPgDistObjectUpdateCommand = CreatePgDistObjectEntryCommand(distAddress); - SendCommandToWorkersWithMetadataViaSuperUser(workerPgDistObjectUpdateCommand); + SendCommandToRemoteNodesWithMetadataViaSuperUser(workerPgDistObjectUpdateCommand); } } @@ -358,8 +359,12 @@ ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, } +/* + * Deletes all pg_dist_object records for distributed roles in `DROP ROLE` statement a + * and for all databases in `DROP DATABASE` statement + */ void -UnmarkRolesAndDatabaseDistributed(Node *node) +UnmarkNodeWideObjectsDistributed(Node *node) { if (IsA(node, DropRoleStmt)) { @@ -378,9 +383,9 @@ UnmarkRolesAndDatabaseDistributed(Node *node) char *dbName = stmt->dbname; Oid dbOid = get_database_oid(dbName, stmt->missing_ok); - ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress)); - ObjectAddressSet(*dbAddress, DatabaseRelationId, dbOid); - UnmarkObjectDistributed(dbAddress); + ObjectAddress *dbObjectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*dbObjectAddress, DatabaseRelationId, dbOid); + UnmarkObjectDistributed(dbObjectAddress); } } @@ -420,7 +425,7 @@ UnmarkObjectDistributed(const ObjectAddress *address) * IsObjectDistributed returns if the object addressed is already distributed in the * cluster. This performs a local indexed lookup in pg_dist_object. */ -bool +static bool IsObjectDistributed(const ObjectAddress *address) { ScanKeyData key[3]; diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 85a945308..44179cffb 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -8,7 +8,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_version_compat.h" #include "stdint.h" diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 54fa801ae..d83706535 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -30,12 +30,15 @@ #include "catalog/pg_attrdef.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" +#include "catalog/pg_database.h" +#include "catalog/pg_database_d.h" #include "catalog/pg_depend.h" #include "catalog/pg_foreign_server.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "commands/async.h" +#include "commands/dbcommands.h" #include "distributed/argutils.h" #include "distributed/backend_data.h" #include "distributed/citus_ruleutils.h" @@ -134,7 +137,7 @@ static bool ShouldSkipMetadataChecks(void); static void EnsurePartitionMetadataIsSane(Oid relationId, char distributionMethod, int colocationId, char replicationModel, Var *distributionKey); -static void EnsureCoordinatorInitiatedOperation(void); +static void EnsureCitusInitiatedOperation(void); static void EnsureShardMetadataIsSane(Oid relationId, int64 shardId, char storageType, text *shardMinValue, text *shardMaxValue); @@ -179,6 +182,7 @@ PG_FUNCTION_INFO_V1(citus_internal_delete_colocation_metadata); PG_FUNCTION_INFO_V1(citus_internal_add_tenant_schema); PG_FUNCTION_INFO_V1(citus_internal_delete_tenant_schema); PG_FUNCTION_INFO_V1(citus_internal_update_none_dist_table_metadata); +PG_FUNCTION_INFO_V1(citus_internal_database_command); static bool got_SIGTERM = false; @@ -1001,7 +1005,7 @@ citus_internal_add_object_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* * Ensure given distributionArgumentIndex and colocationId values are @@ -3090,7 +3094,7 @@ citus_internal_add_partition_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); if (distributionMethod == DISTRIBUTE_BY_NONE && distributionColumnVar != NULL) { @@ -3206,7 +3210,7 @@ citus_internal_delete_partition_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } DeletePartitionRow(relationId); @@ -3254,7 +3258,7 @@ citus_internal_add_shard_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* * Even if the table owner is a malicious user and the shard metadata is @@ -3272,19 +3276,13 @@ citus_internal_add_shard_metadata(PG_FUNCTION_ARGS) /* - * EnsureCoordinatorInitiatedOperation is a helper function which ensures that - * the execution is initiated by the coordinator on a worker node. + * EnsureCitusInitiatedOperation is a helper function which ensures that + * the execution is initiated by Citus. */ static void -EnsureCoordinatorInitiatedOperation(void) +EnsureCitusInitiatedOperation(void) { - /* - * We are restricting the operation to only MX workers with the local group id - * check. The other two checks are to ensure that the operation is initiated - * by the coordinator. - */ - if (!(IsCitusInternalBackend() || IsRebalancerInternalBackend()) || - GetLocalGroupId() == COORDINATOR_GROUP_ID) + if (!(IsCitusInternalBackend() || IsRebalancerInternalBackend())) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("This is an internal Citus function can only be " @@ -3465,7 +3463,7 @@ citus_internal_delete_placement_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } DeleteShardPlacementRow(placementId); @@ -3513,7 +3511,7 @@ citus_internal_add_placement_metadata_internal(int64 shardId, int64 shardLength, if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* * Even if the table owner is a malicious user, as long as the shard placements @@ -3608,7 +3606,7 @@ citus_internal_update_placement_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); if (!ShardExists(shardId)) { @@ -3672,7 +3670,7 @@ citus_internal_delete_shard_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); if (!ShardExists(shardId)) { @@ -3715,7 +3713,7 @@ citus_internal_update_relation_colocation(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* ensure that the table is in pg_dist_partition */ char partitionMethod = PartitionMethodViaCatalog(relationId); @@ -3781,7 +3779,7 @@ citus_internal_add_colocation_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } InsertColocationGroupLocally(colocationId, shardCount, replicationFactor, @@ -3806,7 +3804,7 @@ citus_internal_delete_colocation_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } DeleteColocationGroupLocally(colocationId); @@ -3885,7 +3883,7 @@ citus_internal_update_none_dist_table_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } UpdateNoneDistTableMetadata(relationId, replicationModel, @@ -3895,6 +3893,80 @@ citus_internal_update_none_dist_table_metadata(PG_FUNCTION_ARGS) } +/* + * citus_internal_database_command is an internal UDF to + * create/drop a database in an idempotent maner without + * transaction block restrictions. + */ +Datum +citus_internal_database_command(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + if (!ShouldSkipMetadataChecks()) + { + EnsureCoordinatorInitiatedOperation(); + } + PG_ENSURE_ARGNOTNULL(0, "database command"); + + text *commandText = PG_GETARG_TEXT_P(0); + char *command = text_to_cstring(commandText); + Node *parseTree = ParseTreeNode(command); + + int saveNestLevel = NewGUCNestLevel(); + + set_config_option("citus.enable_ddl_propagation", "off", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); + + set_config_option("citus.enable_create_database_propagation", "off", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); + + /* + * createdb() / DropDatabase() uses ParseState to report the error position for the + * input command and the position is reported to be 0 when it's provided as NULL. + * We're okay with that because we don't expect this UDF to be called with an incorrect + * DDL command. + */ + ParseState *pstate = NULL; + + if (IsA(parseTree, CreatedbStmt)) + { + CreatedbStmt *stmt = castNode(CreatedbStmt, parseTree); + + bool missingOk = true; + Oid databaseOid = get_database_oid(stmt->dbname, missingOk); + + if (!OidIsValid(databaseOid)) + { + createdb(pstate, (CreatedbStmt *) parseTree); + } + } + else if (IsA(parseTree, DropdbStmt)) + { + DropdbStmt *stmt = castNode(DropdbStmt, parseTree); + + bool missingOk = false; + Oid databaseOid = get_database_oid(stmt->dbname, missingOk); + + + if (OidIsValid(databaseOid)) + { + DropDatabase(pstate, (DropdbStmt *) parseTree); + } + } + else + { + ereport(ERROR, (errmsg("unsupported command type %d", nodeTag(parseTree)))); + } + + /* Rollbacks GUCs to the state before this session */ + AtEOXact_GUC(true, saveNestLevel); + + PG_RETURN_VOID(); +} + + /* * SyncNewColocationGroup synchronizes a new pg_dist_colocation entry to a worker. */ @@ -4503,7 +4575,7 @@ PropagateNodeWideObjectsCommandList(void) if (EnableCreateDatabasePropagation) { - /* Get commands for database creation */ + /* get commands for database creation */ List *createDatabaseCommands = GenerateCreateDatabaseCommandList(); ddlCommands = list_concat(ddlCommands, createDatabaseCommands); } diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index ae0f6589a..0d9963c12 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -17,13 +17,14 @@ #include "libpq-fe.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" #include "catalog/dependency.h" +#include "catalog/index.h" #include "catalog/indexing.h" #include "catalog/pg_authid.h" #include "catalog/pg_constraint.h" @@ -88,11 +89,11 @@ static uint64 * AllocateUint64(uint64 value); static void RecordDistributedRelationDependencies(Oid distributedRelationId); static GroupShardPlacement * TupleToGroupShardPlacement(TupleDesc tupleDesc, HeapTuple heapTuple); -static bool DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, - bool failOnError, uint64 *tableSize); -static bool DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, - SizeQueryType sizeQueryType, bool failOnError, - uint64 *tableSize); +static bool DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType, + bool failOnError, uint64 *relationSize); +static bool DistributedRelationSizeOnWorker(WorkerNode *workerNode, Oid relationId, + SizeQueryType sizeQueryType, bool failOnError, + uint64 *relationSize); static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId); static char * GenerateShardIdNameValuesForShardList(List *shardIntervalList, bool firstValue); @@ -282,7 +283,7 @@ citus_shard_sizes(PG_FUNCTION_ARGS) /* - * citus_total_relation_size accepts a table name and returns a distributed table + * citus_total_relation_size accepts a distributed table name and returns a distributed table * and its indexes' total relation size. */ Datum @@ -294,20 +295,20 @@ citus_total_relation_size(PG_FUNCTION_ARGS) bool failOnError = PG_GETARG_BOOL(1); SizeQueryType sizeQueryType = TOTAL_RELATION_SIZE; - uint64 tableSize = 0; + uint64 relationSize = 0; - if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &tableSize)) + if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize)) { Assert(!failOnError); PG_RETURN_NULL(); } - PG_RETURN_INT64(tableSize); + PG_RETURN_INT64(relationSize); } /* - * citus_table_size accepts a table name and returns a distributed table's total + * citus_table_size accepts a distributed table name and returns a distributed table's total * relation size. */ Datum @@ -318,21 +319,24 @@ citus_table_size(PG_FUNCTION_ARGS) Oid relationId = PG_GETARG_OID(0); bool failOnError = true; SizeQueryType sizeQueryType = TABLE_SIZE; - uint64 tableSize = 0; + uint64 relationSize = 0; - if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &tableSize)) + /* We do not check if relation is really a table, like PostgreSQL is doing. */ + if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize)) { Assert(!failOnError); PG_RETURN_NULL(); } - PG_RETURN_INT64(tableSize); + PG_RETURN_INT64(relationSize); } /* - * citus_relation_size accept a table name and returns a relation's 'main' + * citus_relation_size accept a distributed relation name and returns a relation's 'main' * fork's size. + * + * Input relation is allowed to be an index on a distributed table too. */ Datum citus_relation_size(PG_FUNCTION_ARGS) @@ -344,7 +348,7 @@ citus_relation_size(PG_FUNCTION_ARGS) SizeQueryType sizeQueryType = RELATION_SIZE; uint64 relationSize = 0; - if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &relationSize)) + if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize)) { Assert(!failOnError); PG_RETURN_NULL(); @@ -506,13 +510,16 @@ ReceiveShardIdAndSizeResults(List *connectionList, Tuplestorestate *tupleStore, /* - * DistributedTableSize is helper function for each kind of citus size functions. - * It first checks whether the table is distributed and size query can be run on - * it. Connection to each node has to be established to get the size of the table. + * DistributedRelationSize is helper function for each kind of citus size + * functions. It first checks whether the relation is a distributed table or an + * index belonging to a distributed table and size query can be run on it. + * Connection to each node has to be established to get the size of the + * relation. + * Input relation is allowed to be an index on a distributed table too. */ static bool -DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnError, - uint64 *tableSize) +DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType, + bool failOnError, uint64 *relationSize) { int logLevel = WARNING; @@ -538,7 +545,7 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr if (relation == NULL) { ereport(logLevel, - (errmsg("could not compute table size: relation does not exist"))); + (errmsg("could not compute relation size: relation does not exist"))); return false; } @@ -553,8 +560,9 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr { uint64 relationSizeOnNode = 0; - bool gotSize = DistributedTableSizeOnWorker(workerNode, relationId, sizeQueryType, - failOnError, &relationSizeOnNode); + bool gotSize = DistributedRelationSizeOnWorker(workerNode, relationId, + sizeQueryType, + failOnError, &relationSizeOnNode); if (!gotSize) { return false; @@ -563,21 +571,22 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr sumOfSizes += relationSizeOnNode; } - *tableSize = sumOfSizes; + *relationSize = sumOfSizes; return true; } /* - * DistributedTableSizeOnWorker gets the workerNode and relationId to calculate + * DistributedRelationSizeOnWorker gets the workerNode and relationId to calculate * size of that relation on the given workerNode by summing up the size of each * shard placement. + * Input relation is allowed to be an index on a distributed table too. */ static bool -DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, - SizeQueryType sizeQueryType, - bool failOnError, uint64 *tableSize) +DistributedRelationSizeOnWorker(WorkerNode *workerNode, Oid relationId, + SizeQueryType sizeQueryType, + bool failOnError, uint64 *relationSize) { int logLevel = WARNING; @@ -591,6 +600,17 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, uint32 connectionFlag = 0; PGresult *result = NULL; + /* if the relation is an index, update relationId and define indexId */ + Oid indexId = InvalidOid; + Oid relKind = get_rel_relkind(relationId); + if (relKind == RELKIND_INDEX || relKind == RELKIND_PARTITIONED_INDEX) + { + indexId = relationId; + + bool missingOk = false; + relationId = IndexGetRelation(indexId, missingOk); + } + List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode, relationId); /* @@ -598,21 +618,22 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, * But citus size functions shouldn't include them, like PG. */ bool optimizePartitionCalculations = false; - StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements( + StringInfo relationSizeQuery = GenerateSizeQueryOnMultiplePlacements( shardIntervalsOnNode, + indexId, sizeQueryType, optimizePartitionCalculations); MultiConnection *connection = GetNodeConnection(connectionFlag, workerNodeName, workerNodePort); - int queryResult = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data, + int queryResult = ExecuteOptionalRemoteCommand(connection, relationSizeQuery->data, &result); if (queryResult != 0) { ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("could not connect to %s:%d to get size of " - "table \"%s\"", + "relation \"%s\"", workerNodeName, workerNodePort, get_rel_name(relationId)))); @@ -626,19 +647,19 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, ClearResults(connection, failOnError); ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("cannot parse size of table \"%s\" from %s:%d", + errmsg("cannot parse size of relation \"%s\" from %s:%d", get_rel_name(relationId), workerNodeName, workerNodePort))); return false; } - StringInfo tableSizeStringInfo = (StringInfo) linitial(sizeList); - char *tableSizeString = tableSizeStringInfo->data; + StringInfo relationSizeStringInfo = (StringInfo) linitial(sizeList); + char *relationSizeString = relationSizeStringInfo->data; - if (strlen(tableSizeString) > 0) + if (strlen(relationSizeString) > 0) { - *tableSize = SafeStringToUint64(tableSizeString); + *relationSize = SafeStringToUint64(relationSizeString); } else { @@ -647,7 +668,7 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, * being executed. For this case we get an empty string as table size. * We can take that as zero to prevent any unnecessary errors. */ - *tableSize = 0; + *relationSize = 0; } PQclear(result); @@ -732,7 +753,7 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId) /* * GenerateSizeQueryOnMultiplePlacements generates a select size query to get - * size of multiple tables. Note that, different size functions supported by PG + * size of multiple relations. Note that, different size functions supported by PG * are also supported by this function changing the size query type given as the * last parameter to function. Depending on the sizeQueryType enum parameter, the * generated query will call one of the functions: pg_relation_size, @@ -740,9 +761,13 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId) * This function uses UDFs named worker_partitioned_*_size for partitioned tables, * if the parameter optimizePartitionCalculations is true. The UDF to be called is * determined by the parameter sizeQueryType. + * + * indexId is provided if we're interested in the size of an index, not the whole + * table. */ StringInfo GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList, + Oid indexId, SizeQueryType sizeQueryType, bool optimizePartitionCalculations) { @@ -766,16 +791,20 @@ GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList, */ continue; } + + /* we need to build the shard relation name, being an index or table */ + Oid objectId = OidIsValid(indexId) ? indexId : shardInterval->relationId; + uint64 shardId = shardInterval->shardId; - Oid schemaId = get_rel_namespace(shardInterval->relationId); + Oid schemaId = get_rel_namespace(objectId); char *schemaName = get_namespace_name(schemaId); - char *shardName = get_rel_name(shardInterval->relationId); + char *shardName = get_rel_name(objectId); AppendShardIdToName(&shardName, shardId); char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName); char *quotedShardName = quote_literal_cstr(shardQualifiedName); - /* for partitoned tables, we will call worker_partitioned_... size functions */ + /* for partitioned tables, we will call worker_partitioned_... size functions */ if (optimizePartitionCalculations && PartitionedTable(shardInterval->relationId)) { partitionedShardNames = lappend(partitionedShardNames, quotedShardName); @@ -1010,7 +1039,7 @@ AppendShardIdNameValues(StringInfo selectQuery, ShardInterval *shardInterval) /* - * ErrorIfNotSuitableToGetSize determines whether the table is suitable to find + * ErrorIfNotSuitableToGetSize determines whether the relation is suitable to find * its' size with internal functions. */ static void @@ -1018,11 +1047,32 @@ ErrorIfNotSuitableToGetSize(Oid relationId) { if (!IsCitusTable(relationId)) { - char *relationName = get_rel_name(relationId); - char *escapedQueryString = quote_literal_cstr(relationName); - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("cannot calculate the size because relation %s is not " - "distributed", escapedQueryString))); + Oid relKind = get_rel_relkind(relationId); + if (relKind != RELKIND_INDEX && relKind != RELKIND_PARTITIONED_INDEX) + { + char *relationName = get_rel_name(relationId); + char *escapedRelationName = quote_literal_cstr(relationName); + ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg( + "cannot calculate the size because relation %s " + "is not distributed", + escapedRelationName))); + } + bool missingOk = false; + Oid indexId = relationId; + relationId = IndexGetRelation(relationId, missingOk); + if (!IsCitusTable(relationId)) + { + char *tableName = get_rel_name(relationId); + char *escapedTableName = quote_literal_cstr(tableName); + char *indexName = get_rel_name(indexId); + char *escapedIndexName = quote_literal_cstr(indexName); + ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg( + "cannot calculate the size because table %s for " + "index %s is not distributed", + escapedTableName, escapedIndexName))); + } } } diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index a73f2e9d2..041c6dcc4 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -2742,6 +2742,25 @@ EnsureCoordinator(void) } +/* + * EnsurePropagationToCoordinator checks whether the coordinator is added to the + * metadata if we're not on the coordinator. + * + * Given that metadata syncing skips syncing metadata to the coordinator, we need + * too make sure that the coordinator is added to the metadata before propagating + * a command from a worker. For this reason, today we use this only for the commands + * that we support propagating from workers. + */ +void +EnsurePropagationToCoordinator(void) +{ + if (!IsCoordinator()) + { + EnsureCoordinatorIsInMetadata(); + } +} + + /* * EnsureCoordinatorIsInMetadata checks whether the coordinator is added to the * metadata, which is required for many operations. diff --git a/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c index 54f764fc1..a7f40e2ad 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c +++ b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c @@ -24,7 +24,7 @@ #include "distributed/citus_safe_lib.h" #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/version_compat.h" #include "nodes/value.h" #include "utils/array.h" diff --git a/src/backend/distributed/operations/create_shards.c b/src/backend/distributed/operations/create_shards.c index d0fcc9612..8bc3b249f 100644 --- a/src/backend/distributed/operations/create_shards.c +++ b/src/backend/distributed/operations/create_shards.c @@ -158,13 +158,6 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, "replication factor."))); } - /* if we have enough nodes, add an extra placement attempt for backup */ - uint32 placementAttemptCount = (uint32) replicationFactor; - if (workerNodeCount > replicationFactor) - { - placementAttemptCount++; - } - /* set shard storage type according to relation type */ char shardStorageType = ShardStorageType(distributedTableId); diff --git a/src/backend/distributed/operations/delete_protocol.c b/src/backend/distributed/operations/delete_protocol.c index abed39272..54cb568be 100644 --- a/src/backend/distributed/operations/delete_protocol.c +++ b/src/backend/distributed/operations/delete_protocol.c @@ -15,7 +15,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "c.h" #include "fmgr.h" diff --git a/src/backend/distributed/operations/modify_multiple_shards.c b/src/backend/distributed/operations/modify_multiple_shards.c index 8def1b26e..8d596a10b 100644 --- a/src/backend/distributed/operations/modify_multiple_shards.c +++ b/src/backend/distributed/operations/modify_multiple_shards.c @@ -14,7 +14,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "funcapi.h" #include "libpq-fe.h" diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index a3f7092d1..eeaf34321 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "c.h" #include "fmgr.h" diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index e3ee4aa4d..d339ac56a 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -17,7 +17,7 @@ #include -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/htup_details.h" #include "access/genam.h" diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c index 23925a315..79895cc3d 100644 --- a/src/backend/distributed/operations/shard_transfer.c +++ b/src/backend/distributed/operations/shard_transfer.c @@ -792,7 +792,12 @@ ShardListSizeInBytes(List *shardList, char *workerNodeName, uint32 /* we skip child tables of a partitioned table if this boolean variable is true */ bool optimizePartitionCalculations = true; + + /* we're interested in whole table, not a particular index */ + Oid indexId = InvalidOid; + StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(shardList, + indexId, TOTAL_RELATION_SIZE, optimizePartitionCalculations); diff --git a/src/backend/distributed/operations/worker_node_manager.c b/src/backend/distributed/operations/worker_node_manager.c index 76f2732ba..e616770dd 100644 --- a/src/backend/distributed/operations/worker_node_manager.c +++ b/src/backend/distributed/operations/worker_node_manager.c @@ -180,7 +180,7 @@ ActivePrimaryNodeList(LOCKMODE lockMode) /* * ActivePrimaryRemoteNodeList returns a list of all active primary nodes in - * workerNodeHash. + * workerNodeHash except the local one. */ List * ActivePrimaryRemoteNodeList(LOCKMODE lockMode) diff --git a/src/backend/distributed/planner/combine_query_planner.c b/src/backend/distributed/planner/combine_query_planner.c index e61ff8daf..6a171dac1 100644 --- a/src/backend/distributed/planner/combine_query_planner.c +++ b/src/backend/distributed/planner/combine_query_planner.c @@ -11,7 +11,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/pg_type.h" #include "distributed/citus_ruleutils.h" diff --git a/src/backend/distributed/planner/cte_inline.c b/src/backend/distributed/planner/cte_inline.c index ce258916d..9a1bbab96 100644 --- a/src/backend/distributed/planner/cte_inline.c +++ b/src/backend/distributed/planner/cte_inline.c @@ -13,7 +13,7 @@ */ #include "postgres.h" #include "pg_version_compat.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/cte_inline.h" #include "nodes/nodeFuncs.h" diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 65278d1ea..7ad419f0a 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -9,7 +9,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "funcapi.h" @@ -702,6 +702,7 @@ DissuadePlannerFromUsingPlan(PlannedStmt *plan) * Arbitrarily high cost, but low enough that it can be added up * without overflowing by choose_custom_plan(). */ + Assert(plan != NULL); plan->planTree->total_cost = FLT_MAX / 100000000; } diff --git a/src/backend/distributed/planner/extended_op_node_utils.c b/src/backend/distributed/planner/extended_op_node_utils.c index 0a2a8b834..bb87b6949 100644 --- a/src/backend/distributed/planner/extended_op_node_utils.c +++ b/src/backend/distributed/planner/extended_op_node_utils.c @@ -9,7 +9,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/extended_op_node_utils.h" #include "distributed/listutils.h" diff --git a/src/backend/distributed/planner/fast_path_router_planner.c b/src/backend/distributed/planner/fast_path_router_planner.c index ed256296c..1d58911eb 100644 --- a/src/backend/distributed/planner/fast_path_router_planner.c +++ b/src/backend/distributed/planner/fast_path_router_planner.c @@ -34,7 +34,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/distributed_planner.h" #include "distributed/insert_select_planner.h" diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index 2f8da29c0..bacbe16af 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" @@ -525,8 +525,16 @@ ShardPlacementForFunctionColocatedWithDistTable(DistObjectCacheEntry *procedure, if (partitionParam->paramkind == PARAM_EXTERN) { - /* Don't log a message, we should end up here again without a parameter */ - DissuadePlannerFromUsingPlan(plan); + /* + * Don't log a message, we should end up here again without a + * parameter. + * Note that "plan" can be null, for example when a CALL statement + * is prepared. + */ + if (plan) + { + DissuadePlannerFromUsingPlan(plan); + } return NULL; } } diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 1b7f468f8..dd4bee90f 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" diff --git a/src/backend/distributed/planner/local_distributed_join_planner.c b/src/backend/distributed/planner/local_distributed_join_planner.c index d93921966..1867a790c 100644 --- a/src/backend/distributed/planner/local_distributed_join_planner.c +++ b/src/backend/distributed/planner/local_distributed_join_planner.c @@ -71,7 +71,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "funcapi.h" diff --git a/src/backend/distributed/planner/local_plan_cache.c b/src/backend/distributed/planner/local_plan_cache.c index 946d9fc46..1ac8e24a3 100644 --- a/src/backend/distributed/planner/local_plan_cache.c +++ b/src/backend/distributed/planner/local_plan_cache.c @@ -9,7 +9,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c index 3cadea23a..5c593d153 100644 --- a/src/backend/distributed/planner/merge_planner.c +++ b/src/backend/distributed/planner/merge_planner.c @@ -29,7 +29,7 @@ #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_node_metadata.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/query_pushdown_planning.h" #include "distributed/query_colocation_checker.h" #include "distributed/repartition_executor.h" diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index 94d125f41..bf9a1871e 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -11,7 +11,7 @@ #include "libpq-fe.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/htup_details.h" #include "access/xact.h" diff --git a/src/backend/distributed/planner/multi_join_order.c b/src/backend/distributed/planner/multi_join_order.c index 7714a1e08..0eede6b9b 100644 --- a/src/backend/distributed/planner/multi_join_order.c +++ b/src/backend/distributed/planner/multi_join_order.c @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 455f050a0..9001d724d 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/planner/multi_logical_planner.c b/src/backend/distributed/planner/multi_logical_planner.c index 0969e0c7c..d6897d17b 100644 --- a/src/backend/distributed/planner/multi_logical_planner.c +++ b/src/backend/distributed/planner/multi_logical_planner.c @@ -14,7 +14,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/heapam.h" #include "access/nbtree.h" diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index 21befa6f2..aa2c2b5b4 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include #include diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index e70de5bbd..c0930ca34 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/planner/query_colocation_checker.c b/src/backend/distributed/planner/query_colocation_checker.c index 77baab197..fd1df1be9 100644 --- a/src/backend/distributed/planner/query_colocation_checker.c +++ b/src/backend/distributed/planner/query_colocation_checker.c @@ -21,7 +21,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/relation.h" #include "distributed/multi_logical_planner.h" diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c index 3bad73459..8ccc35c82 100644 --- a/src/backend/distributed/planner/query_pushdown_planning.c +++ b/src/backend/distributed/planner/query_pushdown_planning.c @@ -21,7 +21,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/citus_clauses.h" #include "distributed/citus_ruleutils.h" diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index c2426cf5f..d16280662 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -48,7 +48,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "funcapi.h" diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index 368ba2026..4b51a537d 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -10,7 +10,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/colocation_utils.h" #include "distributed/distributed_planner.h" diff --git a/src/backend/distributed/planner/shard_pruning.c b/src/backend/distributed/planner/shard_pruning.c index 5375a70fa..ef244ea66 100644 --- a/src/backend/distributed/planner/shard_pruning.c +++ b/src/backend/distributed/planner/shard_pruning.c @@ -66,7 +66,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "fmgr.h" diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index f66e309ab..97f6fdb3d 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -15,7 +15,7 @@ #include "pgstat.h" #include "libpq-fe.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 32ad4c427..f06e0f2b0 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -1266,7 +1266,7 @@ RegisterCitusConfigVariables(void) DefineCustomBoolVariable( "citus.enable_create_database_propagation", gettext_noop("Enables propagating CREATE DATABASE " - "and DROP DATABASE statements to workers"), + "and DROP DATABASE statements to workers."), NULL, &EnableCreateDatabasePropagation, false, diff --git a/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql index 232e3ad14..b20f6278e 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql @@ -4,7 +4,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text) RETURNS void LANGUAGE C - STRICT + VOLATILE AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS 'run a database command without transaction block restrictions'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql index 232e3ad14..b20f6278e 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql @@ -4,7 +4,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text) RETURNS void LANGUAGE C - STRICT + VOLATILE AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS 'run a database command without transaction block restrictions'; diff --git a/src/backend/distributed/test/fake_am.c b/src/backend/distributed/test/fake_am.c index 8a723e4c4..4b11d7871 100644 --- a/src/backend/distributed/test/fake_am.c +++ b/src/backend/distributed/test/fake_am.c @@ -19,7 +19,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_version_compat.h" diff --git a/src/backend/distributed/test/fake_fdw.c b/src/backend/distributed/test/fake_fdw.c index 4784248c0..f53242f7f 100644 --- a/src/backend/distributed/test/fake_fdw.c +++ b/src/backend/distributed/test/fake_fdw.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "c.h" #include "fmgr.h" diff --git a/src/backend/distributed/test/metadata_sync.c b/src/backend/distributed/test/metadata_sync.c index 46d2303d6..8ad4b15f2 100644 --- a/src/backend/distributed/test/metadata_sync.c +++ b/src/backend/distributed/test/metadata_sync.c @@ -90,6 +90,28 @@ activate_node_snapshot(PG_FUNCTION_ARGS) } +/* + * IsMetadataSynced checks the workers to see if all workers with metadata are + * synced. + */ +static bool +IsMetadataSynced(void) +{ + List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock); + + WorkerNode *workerNode = NULL; + foreach_ptr(workerNode, workerList) + { + if (workerNode->hasMetadata && !workerNode->metadataSynced) + { + return false; + } + } + + return true; +} + + /* * wait_until_metadata_sync waits until the maintenance daemon does a metadata * sync, or times out. @@ -99,19 +121,10 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS) { uint32 timeout = PG_GETARG_UINT32(0); - List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock); - bool waitNotifications = false; - - WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerList) - { - /* if already has metadata, no need to do it again */ - if (workerNode->hasMetadata && !workerNode->metadataSynced) - { - waitNotifications = true; - break; - } - } + /* First we start listening. */ + MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION, + LOCAL_HOST_NAME, PostPortNumber); + ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL); /* * If all the metadata nodes have already been synced, we should not wait. @@ -119,15 +132,12 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS) * the notification and we'd wait unnecessarily here. Worse, the test outputs * might be inconsistent across executions due to the warning. */ - if (!waitNotifications) + if (IsMetadataSynced()) { + CloseConnection(connection); PG_RETURN_VOID(); } - MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION, - LOCAL_HOST_NAME, PostPortNumber); - ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL); - int waitFlags = WL_SOCKET_READABLE | WL_TIMEOUT | WL_POSTMASTER_DEATH; int waitResult = WaitLatchOrSocket(NULL, waitFlags, PQsocket(connection->pgConn), timeout, 0); @@ -139,7 +149,7 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS) { ClearResults(connection, true); } - else if (waitResult & WL_TIMEOUT) + else if (waitResult & WL_TIMEOUT && !IsMetadataSynced()) { elog(WARNING, "waiting for metadata sync timed out"); } diff --git a/src/backend/distributed/test/prune_shard_list.c b/src/backend/distributed/test/prune_shard_list.c index a9f5e4a88..023a759cb 100644 --- a/src/backend/distributed/test/prune_shard_list.c +++ b/src/backend/distributed/test/prune_shard_list.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "c.h" #include "fmgr.h" diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 3e2ea5ca1..c1981b77a 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" #include "unistd.h" diff --git a/src/backend/distributed/transaction/relation_access_tracking.c b/src/backend/distributed/transaction/relation_access_tracking.c index b0af4e476..3ad61ac79 100644 --- a/src/backend/distributed/transaction/relation_access_tracking.c +++ b/src/backend/distributed/transaction/relation_access_tracking.c @@ -15,7 +15,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" diff --git a/src/backend/distributed/transaction/transaction_recovery.c b/src/backend/distributed/transaction/transaction_recovery.c index b46419dc2..a833f5a46 100644 --- a/src/backend/distributed/transaction/transaction_recovery.c +++ b/src/backend/distributed/transaction/transaction_recovery.c @@ -14,7 +14,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" #include "libpq-fe.h" diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index 03ecbea72..3399365aa 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -34,6 +34,12 @@ #include "utils/memutils.h" #include "utils/builtins.h" +static void SendCommandToRemoteMetadataNodesParams(const char *command, + const char *user, int parameterCount, + const Oid *parameterTypes, + const char *const *parameterValues); +static void SendBareCommandListToMetadataNodesInternal(List *commandList, + TargetWorkerSet targetWorkerSet); static void SendCommandToMetadataWorkersParams(const char *command, const char *user, int parameterCount, const Oid *parameterTypes, @@ -150,6 +156,74 @@ SendCommandListToWorkersWithMetadata(List *commands) } +/* + * SendCommandToRemoteNodesWithMetadata sends a command to remote nodes in + * parallel. Commands are committed on the nodes when the local transaction + * commits. + */ +void +SendCommandToRemoteNodesWithMetadata(const char *command) +{ + SendCommandToRemoteMetadataNodesParams(command, CurrentUserName(), + 0, NULL, NULL); +} + + +/* + * SendCommandToRemoteNodesWithMetadataViaSuperUser sends a command to remote + * nodes in parallel by opening a super user connection. Commands are committed + * on the nodes when the local transaction commits. The connection are made as + * the extension owner to ensure write access to the Citus metadata tables. + * + * Since we prevent to open superuser connections for metadata tables, it is + * discouraged to use it. Consider using it only for propagating pg_dist_object + * tuples for dependent objects. + */ +void +SendCommandToRemoteNodesWithMetadataViaSuperUser(const char *command) +{ + SendCommandToRemoteMetadataNodesParams(command, CitusExtensionOwnerName(), + 0, NULL, NULL); +} + + +/* + * SendCommandListToRemoteNodesWithMetadata sends all commands to remote nodes + * with the current user. See `SendCommandToRemoteNodesWithMetadata`for details. + */ +void +SendCommandListToRemoteNodesWithMetadata(List *commands) +{ + char *command = NULL; + foreach_ptr(command, commands) + { + SendCommandToRemoteNodesWithMetadata(command); + } +} + + +/* + * SendCommandToRemoteMetadataNodesParams is a wrapper around + * SendCommandToWorkersParamsInternal() that can be used to send commands + * to remote metadata nodes. + */ +static void +SendCommandToRemoteMetadataNodesParams(const char *command, + const char *user, int parameterCount, + const Oid *parameterTypes, + const char *const *parameterValues) +{ + /* use METADATA_NODES so that ErrorIfAnyMetadataNodeOutOfSync checks local node as well */ + List *workerNodeList = TargetWorkerSetNodeList(METADATA_NODES, + RowShareLock); + + ErrorIfAnyMetadataNodeOutOfSync(workerNodeList); + + SendCommandToWorkersParamsInternal(REMOTE_METADATA_NODES, command, user, + parameterCount, parameterTypes, parameterValues); +} + + /* * TargetWorkerSetNodeList returns a list of WorkerNode's that satisfies the * TargetWorkerSet. @@ -162,17 +236,29 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) { workerNodeList = ActivePrimaryNodeList(lockMode); } - else + else if (targetWorkerSet == REMOTE_NODES || targetWorkerSet == REMOTE_METADATA_NODES) + { + workerNodeList = ActivePrimaryRemoteNodeList(lockMode); + } + else if (targetWorkerSet == NON_COORDINATOR_METADATA_NODES || + targetWorkerSet == NON_COORDINATOR_NODES) { workerNodeList = ActivePrimaryNonCoordinatorNodeList(lockMode); } + else + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid target worker set: %d", targetWorkerSet))); + } + List *result = NIL; WorkerNode *workerNode = NULL; foreach_ptr(workerNode, workerNodeList) { - if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES || targetWorkerSet == - METADATA_NODES) && + if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES || + targetWorkerSet == REMOTE_METADATA_NODES || + targetWorkerSet == METADATA_NODES) && !workerNode->hasMetadata) { continue; @@ -186,16 +272,42 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) /* - * SendBareCommandListToMetadataWorkers sends a list of commands to metadata - * workers in serial. Commands are committed immediately: new connections are - * always used and no transaction block is used (hence "bare"). The connections - * are made as the extension owner to ensure write access to the Citus metadata - * tables. Primarly useful for INDEX commands using CONCURRENTLY. + * SendBareCommandListToRemoteMetadataNodes is a wrapper around + * SendBareCommandListToMetadataNodesInternal() that can be used to send + * bare commands to remote metadata nodes. + */ +void +SendBareCommandListToRemoteMetadataNodes(List *commandList) +{ + SendBareCommandListToMetadataNodesInternal(commandList, + REMOTE_METADATA_NODES); +} + + +/* + * SendBareCommandListToMetadataWorkers is a wrapper around + * SendBareCommandListToMetadataNodesInternal() that can be used to send + * bare commands to metadata workers. */ void SendBareCommandListToMetadataWorkers(List *commandList) { - TargetWorkerSet targetWorkerSet = NON_COORDINATOR_METADATA_NODES; + SendBareCommandListToMetadataNodesInternal(commandList, + NON_COORDINATOR_METADATA_NODES); +} + + +/* + * SendBareCommandListToMetadataNodesInternal sends a list of commands to given + * target worker set in serial. Commands are committed immediately: new connections + * are always used and no transaction block is used (hence "bare"). The connections + * are made as the extension owner to ensure write access to the Citus metadata + * tables. Primarly useful for INDEX commands using CONCURRENTLY. + */ +static void +SendBareCommandListToMetadataNodesInternal(List *commandList, + TargetWorkerSet targetWorkerSet) +{ List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock); char *nodeUser = CurrentUserName(); diff --git a/src/backend/distributed/utils/citus_nodefuncs.c b/src/backend/distributed/utils/citus_nodefuncs.c index aee1ff48a..0998560fe 100644 --- a/src/backend/distributed/utils/citus_nodefuncs.c +++ b/src/backend/distributed/utils/citus_nodefuncs.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/pg_type.h" #include "distributed/citus_nodes.h" diff --git a/src/backend/distributed/utils/citus_outfuncs.c b/src/backend/distributed/utils/citus_outfuncs.c index 9b4ac809c..751063789 100644 --- a/src/backend/distributed/utils/citus_outfuncs.c +++ b/src/backend/distributed/utils/citus_outfuncs.c @@ -18,7 +18,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/utils/citus_safe_lib.c b/src/backend/distributed/utils/citus_safe_lib.c index 82fa8f6f2..cbd06fc50 100644 --- a/src/backend/distributed/utils/citus_safe_lib.c +++ b/src/backend/distributed/utils/citus_safe_lib.c @@ -14,7 +14,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "safe_lib.h" diff --git a/src/backend/distributed/utils/enable_ssl.c b/src/backend/distributed/utils/enable_ssl.c index cac32f74c..35b1e0f1a 100644 --- a/src/backend/distributed/utils/enable_ssl.c +++ b/src/backend/distributed/utils/enable_ssl.c @@ -18,7 +18,7 @@ * it otherwise we get warnings about redefining this value. This needs to be * done before including libpq.h. */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/connection_management.h" #include "distributed/memutils.h" diff --git a/src/backend/distributed/utils/foreign_key_relationship.c b/src/backend/distributed/utils/foreign_key_relationship.c index d30c767df..d69d9044d 100644 --- a/src/backend/distributed/utils/foreign_key_relationship.c +++ b/src/backend/distributed/utils/foreign_key_relationship.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" diff --git a/src/backend/distributed/utils/log_utils.c b/src/backend/distributed/utils/log_utils.c index 59a090a16..7d808591b 100644 --- a/src/backend/distributed/utils/log_utils.c +++ b/src/backend/distributed/utils/log_utils.c @@ -9,7 +9,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "utils/guc.h" #include "distributed/log_utils.h" diff --git a/src/backend/distributed/utils/maintenanced.c b/src/backend/distributed/utils/maintenanced.c index 22a0843bd..851335abe 100644 --- a/src/backend/distributed/utils/maintenanced.c +++ b/src/backend/distributed/utils/maintenanced.c @@ -16,7 +16,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index 924ba4c54..404d792f9 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -6,7 +6,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/heapam.h" diff --git a/src/backend/distributed/utils/task_execution_utils.c b/src/backend/distributed/utils/task_execution_utils.c index 50652b6bd..7251514b5 100644 --- a/src/backend/distributed/utils/task_execution_utils.c +++ b/src/backend/distributed/utils/task_execution_utils.c @@ -6,7 +6,7 @@ #include #include -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "common/hashfn.h" diff --git a/src/include/columnar/columnar_version_compat.h b/src/include/columnar/columnar_version_compat.h index 0e0ae3112..d9b29cdb0 100644 --- a/src/include/columnar/columnar_version_compat.h +++ b/src/include/columnar/columnar_version_compat.h @@ -12,7 +12,7 @@ #ifndef COLUMNAR_COMPAT_H #define COLUMNAR_COMPAT_H -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if PG_VERSION_NUM >= PG_VERSION_15 #define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \ diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index e4a624ddf..c72406dac 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -230,9 +230,6 @@ extern List * PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *que ProcessUtilityContext processUtilityContext); -extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool - isPostprocess); - extern List * PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); @@ -242,10 +239,10 @@ extern List * PreprocessCreateDatabaseStmt(Node *node, const char *queryString, extern List * PostprocessCreateDatabaseStmt(Node *node, const char *queryString); extern List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool - isPostprocess); -extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool - isPostprocess); +extern List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, + bool isPostprocess); +extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, + bool isPostprocess); extern List * GenerateCreateDatabaseCommandList(void); @@ -523,7 +520,6 @@ extern List * RenameRoleStmtObjectAddress(Node *stmt, bool missing_ok, bool extern void UnmarkRolesDistributed(List *roles); extern List * FilterDistributedRoles(List *roles); -extern void EnsureSequentialModeForRoleDDL(void); /* schema.c - forward declarations */ extern List * PostprocessCreateSchemaStmt(Node *node, const char *queryString); diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 1790eb468..caac002ed 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -10,7 +10,7 @@ #ifndef MULTI_UTILITY_H #define MULTI_UTILITY_H -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "postgres.h" @@ -94,7 +94,7 @@ extern void ProcessUtilityParseTree(Node *node, const char *queryString, extern void MarkInvalidateForeignKeyGraph(void); extern void InvalidateForeignKeyGraphForDDL(void); extern List * DDLTaskList(Oid relationId, const char *commandString); -extern List * NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands); +extern List * NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands); extern List * NodeDDLTaskList(TargetWorkerSet targets, List *commands); extern bool AlterTableInProgress(void); extern bool DropSchemaOrDBInProgress(void); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 51e3c4f46..cf128590d 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -123,12 +123,12 @@ extern void AppendGrantSharedSuffix(StringInfo buf, GrantStmt *stmt); /* Common deparser utils */ -struct option_format +typedef struct DefElemOptionFormat { - const char *name; - const char *format; - const int type; -}; + char *name; + char *format; + int type; +} DefElemOptionFormat; typedef enum OptionFormatType { @@ -140,9 +140,9 @@ typedef enum OptionFormatType } OptionFormatType; -extern void optionToStatement(StringInfo buf, DefElem *option, const struct - option_format *opt_formats, int - opt_formats_len); +extern void DefElemOptionToStatement(StringInfo buf, DefElem *option, const + DefElemOptionFormat *opt_formats, int + opt_formats_len); /* forward declarations for deparse_statistics_stmts.c */ diff --git a/src/include/distributed/distributed_planner.h b/src/include/distributed/distributed_planner.h index d46fbf2e6..bc8f5bc94 100644 --- a/src/include/distributed/distributed_planner.h +++ b/src/include/distributed/distributed_planner.h @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "nodes/plannodes.h" diff --git a/src/include/distributed/hash_helpers.h b/src/include/distributed/hash_helpers.h index 2b16d110c..168879b4d 100644 --- a/src/include/distributed/hash_helpers.h +++ b/src/include/distributed/hash_helpers.h @@ -11,7 +11,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "utils/hsearch.h" diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index 86fada5f7..cf24a8c81 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -21,13 +21,12 @@ extern bool ObjectExists(const ObjectAddress *address); extern bool CitusExtensionObject(const ObjectAddress *objectAddress); extern bool IsAnyObjectDistributed(const List *addresses); -extern bool IsObjectDistributed(const ObjectAddress *address); extern bool ClusterHasDistributedFunctionWithDistArgument(void); extern void MarkObjectDistributed(const ObjectAddress *distAddress); extern void MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress); extern void MarkObjectDistributedLocally(const ObjectAddress *distAddress); extern void UnmarkObjectDistributed(const ObjectAddress *address); -extern void UnmarkRolesAndDatabaseDistributed(Node *node); +extern void UnmarkNodeWideObjectsDistributed(Node *node); extern bool IsTableOwnedByExtension(Oid relationId); extern bool ObjectAddressDependsOnExtension(const ObjectAddress *target); extern bool IsAnyObjectAddressOwnedByExtension(const List *targets, diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index 9234adc76..7e50a6af6 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -342,6 +342,7 @@ extern void LookupTaskPlacementHostAndPort(ShardPlacement *taskPlacement, char * int *nodePort); extern bool IsDummyPlacement(ShardPlacement *taskPlacement); extern StringInfo GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList, + Oid indexId, SizeQueryType sizeQueryType, bool optimizePartitionCalculations); extern List * RemoveCoordinatorPlacementIfNotSingleNode(List *placementList); diff --git a/src/include/distributed/multi_physical_planner.h b/src/include/distributed/multi_physical_planner.h index 35d83eb33..6bdc95cb3 100644 --- a/src/include/distributed/multi_physical_planner.h +++ b/src/include/distributed/multi_physical_planner.h @@ -16,7 +16,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "c.h" diff --git a/src/include/distributed/recursive_planning.h b/src/include/distributed/recursive_planning.h index a883047f6..87df7fba2 100644 --- a/src/include/distributed/recursive_planning.h +++ b/src/include/distributed/recursive_planning.h @@ -10,7 +10,7 @@ #ifndef RECURSIVE_PLANNING_H #define RECURSIVE_PLANNING_H -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/errormessage.h" #include "distributed/log_utils.h" #include "distributed/relation_restriction_equivalence.h" diff --git a/src/include/distributed/relation_utils.h b/src/include/distributed/relation_utils.h index acf84a9da..d3a5ab105 100644 --- a/src/include/distributed/relation_utils.h +++ b/src/include/distributed/relation_utils.h @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if PG_VERSION_NUM >= PG_VERSION_16 #include "parser/parse_relation.h" #endif diff --git a/src/include/distributed/worker_manager.h b/src/include/distributed/worker_manager.h index 5ad7f4962..694d38ccf 100644 --- a/src/include/distributed/worker_manager.h +++ b/src/include/distributed/worker_manager.h @@ -87,6 +87,7 @@ extern WorkerNode * FindNodeWithNodeId(int nodeId, bool missingOk); extern WorkerNode * ModifiableWorkerNode(const char *nodeName, int32 nodePort); extern List * ReadDistNode(bool includeNodesFromOtherClusters); extern void EnsureCoordinator(void); +extern void EnsurePropagationToCoordinator(void); extern void EnsureCoordinatorIsInMetadata(void); extern void InsertCoordinatorIfClusterEmpty(void); extern uint32 GroupForNode(char *nodeName, int32 nodePort); diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h index 631940edf..d622fe366 100644 --- a/src/include/distributed/worker_transaction.h +++ b/src/include/distributed/worker_transaction.h @@ -29,11 +29,22 @@ typedef enum TargetWorkerSet */ NON_COORDINATOR_METADATA_NODES, + /* + * All the active primary nodes in the metadata which have metadata + * except the local node + */ + REMOTE_METADATA_NODES, + /* * All the active primary nodes in the metadata except the coordinator */ NON_COORDINATOR_NODES, + /* + * All the active primary nodes in the metadata except the local node + */ + REMOTE_NODES, + /* * All active primary nodes in the metadata */ @@ -74,6 +85,10 @@ extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(cons extern void SendCommandToWorkersWithMetadata(const char *command); extern void SendCommandToWorkersWithMetadataViaSuperUser(const char *command); extern void SendCommandListToWorkersWithMetadata(List *commands); +extern void SendCommandToRemoteNodesWithMetadata(const char *command); +extern void SendCommandToRemoteNodesWithMetadataViaSuperUser(const char *command); +extern void SendCommandListToRemoteNodesWithMetadata(List *commands); +extern void SendBareCommandListToRemoteMetadataNodes(List *commandList); extern void SendBareCommandListToMetadataWorkers(List *commandList); extern void EnsureNoModificationsHaveBeenDone(void); extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName, diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index 1bdbae580..4e874e2ee 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -11,7 +11,7 @@ #ifndef PG_VERSION_COMPAT_H #define PG_VERSION_COMPAT_H -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if PG_VERSION_NUM >= PG_VERSION_16 diff --git a/src/include/distributed/pg_version_constants.h b/src/include/pg_version_constants.h similarity index 100% rename from src/include/distributed/pg_version_constants.h rename to src/include/pg_version_constants.h diff --git a/src/test/regress/Pipfile.lock b/src/test/regress/Pipfile.lock index 15cb7ecda..bdb42a1c3 100644 --- a/src/test/regress/Pipfile.lock +++ b/src/test/regress/Pipfile.lock @@ -127,72 +127,61 @@ }, "cffi": { "hashes": [ - "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5", - "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef", - "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104", - "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426", - "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405", - "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375", - "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a", - "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e", - "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc", - "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf", - "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185", - "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497", - "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3", - "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35", - "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c", - "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83", - "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21", - "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca", - "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984", - "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac", - "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd", - "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee", - "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a", - "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2", - "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192", - "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7", - "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585", - "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f", - "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e", - "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27", - "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b", - "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e", - "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e", - "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d", - "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c", - "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415", - "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82", - "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02", - "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314", - "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325", - "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c", - "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3", - "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914", - "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045", - "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d", - "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9", - "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5", - "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2", - "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c", - "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3", - "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2", - "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8", - "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d", - "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d", - "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9", - "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162", - "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76", - "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4", - "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e", - "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9", - "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6", - "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b", - "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01", - "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0" + "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", + "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", + "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", + "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", + "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", + "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", + "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", + "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", + "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", + "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", + "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", + "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", + "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", + "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", + "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", + "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", + "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", + "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", + "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", + "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", + "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", + "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", + "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", + "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", + "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", + "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", + "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", + "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", + "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", + "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", + "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", + "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", + "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", + "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", + "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", + "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", + "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", + "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", + "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", + "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", + "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", + "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", + "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", + "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", + "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", + "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", + "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", + "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", + "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", + "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", + "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", + "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" ], - "version": "==1.15.1" + "markers": "python_version >= '3.8'", + "version": "==1.16.0" }, "click": { "hashes": [ @@ -420,78 +409,78 @@ "mitmproxy": { "editable": true, "git": "https://github.com/citusdata/mitmproxy.git", - "markers": "python_version >= '3.10'", + "markers": "python_version >= '3.9'", "ref": "2fd18ef051b987925a36337ab1d61aa674353b44" }, "msgpack": { "hashes": [ - "sha256:00ce5f827d4f26fc094043e6f08b6069c1b148efa2631c47615ae14fb6cafc89", - "sha256:04450e4b5e1e662e7c86b6aafb7c230af9334fd0becf5e6b80459a507884241c", - "sha256:099c3d8a027367e1a6fc55d15336f04ff65c60c4f737b5739f7db4525c65fe9e", - "sha256:102cfb54eaefa73e8ca1e784b9352c623524185c98e057e519545131a56fb0af", - "sha256:14db7e1b7a7ed362b2f94897bf2486c899c8bb50f6e34b2db92fe534cdab306f", - "sha256:159cfec18a6e125dd4723e2b1de6f202b34b87c850fb9d509acfd054c01135e9", - "sha256:1dc67b40fe81217b308ab12651adba05e7300b3a2ccf84d6b35a878e308dd8d4", - "sha256:1f0e36a5fa7a182cde391a128a64f437657d2b9371dfa42eda3436245adccbf5", - "sha256:229ccb6713c8b941eaa5cf13dc7478eba117f21513b5893c35e44483e2f0c9c8", - "sha256:25d3746da40f3c8c59c3b1d001e49fd2aa17904438f980d9a391370366df001e", - "sha256:32c0aff31f33033f4961abc01f78497e5e07bac02a508632aef394b384d27428", - "sha256:33bbf47ea5a6ff20c23426106e81863cdbb5402de1825493026ce615039cc99d", - "sha256:35ad5aed9b52217d4cea739d0ea3a492a18dd86fecb4b132668a69f27fb0363b", - "sha256:3910211b0ab20be3a38e0bb944ed45bd4265d8d9f11a3d1674b95b298e08dd5c", - "sha256:3b5658b1f9e486a2eec4c0c688f213a90085b9cf2fec76ef08f98fdf6c62f4b9", - "sha256:40b801b768f5a765e33c68f30665d3c6ee1c8623a2d2bb78e6e59f2db4e4ceb7", - "sha256:47275ff73005a3e5e146e50baa2378e1730cba6e292f0222bc496a8e4c4adfc8", - "sha256:55bb4a1bf94e39447bc08238a2fb8a767460388a8192f67c103442eb36920887", - "sha256:5b08676a17e3f791daad34d5fcb18479e9c85e7200d5a17cbe8de798643a7e37", - "sha256:5b16344032a27b2ccfd341f89dadf3e4ef6407d91e4b93563c14644a8abb3ad7", - "sha256:5c5e05e4f5756758c58a8088aa10dc70d851c89f842b611fdccfc0581c1846bc", - "sha256:5cd67674db3c73026e0a2c729b909780e88bd9cbc8184256f9567640a5d299a8", - "sha256:5e7fae9ca93258a956551708cf60dc6c8145574e32ce8c8c4d894e63bcb04341", - "sha256:61213482b5a387ead9e250e9e3cb290292feca39dc83b41c3b1b7b8ffc8d8ecb", - "sha256:619a63753ba9e792fe3c6c0fc2b9ee2cfbd92153dd91bee029a89a71eb2942cd", - "sha256:652e4b7497825b0af6259e2c54700e6dc33d2fc4ed92b8839435090d4c9cc911", - "sha256:68569509dd015fcdd1e6b2b3ccc8c51fd27d9a97f461ccc909270e220ee09685", - "sha256:6a01a072b2219b65a6ff74df208f20b2cac9401c60adb676ee34e53b4c651077", - "sha256:70843788c85ca385846a2d2f836efebe7bb2687ca0734648bf5c9dc6c55602d2", - "sha256:76820f2ece3b0a7c948bbb6a599020e29574626d23a649476def023cbb026787", - "sha256:7a006c300e82402c0c8f1ded11352a3ba2a61b87e7abb3054c845af2ca8d553c", - "sha256:7baf16fd8908a025c4a8d7b699103e72d41f967e2aee5a2065432bcdbd9fd06e", - "sha256:7ecf431786019a7bfedc28281531d706627f603e3691d64eccdbce3ecd353823", - "sha256:885de1ed5ea01c1bfe0a34c901152a264c3c1f8f1d382042b92ea354bd14bb0e", - "sha256:88cdb1da7fdb121dbb3116910722f5acab4d6e8bfcacab8fafe27e2e7744dc6a", - "sha256:95ade0bd4cf69e04e8b8f8ec2d197d9c9c4a9b6902e048dc7456bf6d82e12a80", - "sha256:9b88dc97ba86c96b964c3745a445d9a65f76fe21955a953064fe04adb63e9367", - "sha256:9c780d992f5d734432726b92a0c87bf1857c3d85082a8dea29cbf56e44a132b3", - "sha256:9f85200ea102276afdd3749ca94747f057bbb868d1c52921ee2446730b508d0f", - "sha256:a1cf98afa7ad5e7012454ca3fde254499a13f9d92fd50cb46118118a249a1355", - "sha256:a635aecf1047255576dbb0927cbf9a7aa4a68e9d54110cc3c926652d18f144e0", - "sha256:ae97504958d0bc58c1152045c170815d5c4f8af906561ce044b6358b43d0c97e", - "sha256:b06a5095a79384760625b5de3f83f40b3053a385fb893be8a106fbbd84c14980", - "sha256:b5c8dd9a386a66e50bd7fa22b7a49fb8ead2b3574d6bd69eb1caced6caea0803", - "sha256:bae6c561f11b444b258b1b4be2bdd1e1cf93cd1d80766b7e869a79db4543a8a8", - "sha256:bbb4448a05d261fae423d5c0b0974ad899f60825bc77eabad5a0c518e78448c2", - "sha256:bd6af61388be65a8701f5787362cb54adae20007e0cc67ca9221a4b95115583b", - "sha256:bf652839d16de91fe1cfb253e0a88db9a548796939533894e07f45d4bdf90a5f", - "sha256:d6d25b8a5c70e2334ed61a8da4c11cd9b97c6fbd980c406033f06e4463fda006", - "sha256:da057d3652e698b00746e47f06dbb513314f847421e857e32e1dc61c46f6c052", - "sha256:e0ed35d6d6122d0baa9a1b59ebca4ee302139f4cfb57dab85e4c73ab793ae7ed", - "sha256:e36560d001d4ba469d469b02037f2dd404421fd72277d9474efe9f03f83fced5", - "sha256:f4321692e7f299277e55f322329b2c972d93bb612d85f3fda8741bec5c6285ce", - "sha256:f75114c05ec56566da6b55122791cf5bb53d5aada96a98c016d6231e03132f76", - "sha256:fb4571efe86545b772a4630fee578c213c91cbcfd20347806e47fd4e782a18fe", - "sha256:fc97aa4b4fb928ff4d3b74da7c30b360d0cb3ede49a5a6e1fd9705f49aea1deb" + "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862", + "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d", + "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3", + "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672", + "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0", + "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9", + "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee", + "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46", + "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524", + "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819", + "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc", + "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc", + "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1", + "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82", + "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81", + "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6", + "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d", + "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2", + "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c", + "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87", + "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84", + "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e", + "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95", + "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f", + "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b", + "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93", + "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf", + "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61", + "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c", + "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8", + "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d", + "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c", + "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4", + "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba", + "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415", + "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee", + "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d", + "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9", + "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075", + "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f", + "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7", + "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681", + "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329", + "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1", + "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf", + "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c", + "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5", + "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b", + "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5", + "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e", + "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b", + "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad", + "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd", + "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7", + "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002", + "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc" ], "markers": "python_version >= '3.8'", - "version": "==1.0.6" + "version": "==1.0.7" }, "packaging": { "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" ], "markers": "python_version >= '3.7'", - "version": "==23.1" + "version": "==23.2" }, "passlib": { "hashes": [ @@ -698,6 +687,62 @@ "markers": "python_version >= '3'", "version": "==0.17.16" }, + "ruamel.yaml.clib": { + "hashes": [ + "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d", + "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", + "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", + "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", + "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", + "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", + "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", + "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", + "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1", + "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", + "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", + "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", + "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", + "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", + "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f", + "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", + "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa", + "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", + "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", + "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", + "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", + "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", + "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3", + "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", + "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", + "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", + "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", + "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", + "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279", + "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", + "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", + "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", + "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb", + "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942", + "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", + "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", + "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", + "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd", + "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", + "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", + "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", + "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1", + "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2", + "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875", + "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412" + ], + "markers": "python_version < '3.10' and platform_python_implementation == 'CPython'", + "version": "==0.2.8" + }, "sortedcontainers": { "hashes": [ "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", @@ -746,11 +791,12 @@ }, "werkzeug": { "hashes": [ - "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", - "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" + "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", + "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" ], + "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==2.3.7" + "version": "==3.0.1" }, "wsproto": { "hashes": [ @@ -906,11 +952,11 @@ }, "packaging": { "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" ], "markers": "python_version >= '3.7'", - "version": "==23.1" + "version": "==23.2" }, "pathspec": { "hashes": [ @@ -922,19 +968,19 @@ }, "platformdirs": { "hashes": [ - "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d", - "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d" + "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3", + "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e" ], "markers": "python_version >= '3.7'", - "version": "==3.10.0" + "version": "==3.11.0" }, "pycodestyle": { "hashes": [ - "sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0", - "sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8" + "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f", + "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67" ], "markers": "python_version >= '3.8'", - "version": "==2.11.0" + "version": "==2.11.1" }, "pyflakes": { "hashes": [ diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py index 53c9c7944..40c727189 100644 --- a/src/test/regress/citus_tests/common.py +++ b/src/test/regress/citus_tests/common.py @@ -581,6 +581,14 @@ class QueryRunner(ABC): with self.cur(**kwargs) as cur: cur.execute(query, params=params) + def sql_prepared(self, query, params=None, **kwargs): + """Run an SQL query, with prepare=True + + This opens a new connection and closes it once the query is done + """ + with self.cur(**kwargs) as cur: + cur.execute(query, params=params, prepare=True) + def sql_row(self, query, params=None, allow_empty_result=False, **kwargs): """Run an SQL query that returns a single row and returns this row diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index 90c16b04e..b902a7998 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -125,7 +125,6 @@ DEPS = { "multi_mx_create_table": TestDeps( None, [ - "multi_test_helpers_superuser", "multi_mx_node_metadata", "multi_cluster_management", "multi_mx_function_table_reference", @@ -151,8 +150,6 @@ DEPS = { ], worker_count=6, ), - "create_drop_database_propagation": TestDeps("minimal_schedule"), - "create_drop_database_propagation_pg15": TestDeps("minimal_schedule"), "function_propagation": TestDeps("minimal_schedule"), "citus_shards": TestDeps("minimal_schedule"), "grant_on_foreign_server_propagation": TestDeps("minimal_schedule"), @@ -177,6 +174,43 @@ DEPS = { ), "grant_on_schema_propagation": TestDeps("minimal_schedule"), "propagate_extension_commands": TestDeps("minimal_schedule"), + "multi_size_queries": TestDeps("base_schedule", ["multi_copy"]), + "multi_mx_node_metadata": TestDeps( + None, + [ + "multi_extension", + "multi_test_helpers", + "multi_test_helpers_superuser", + ], + ), + "multi_mx_function_table_reference": TestDeps( + None, + [ + "multi_cluster_management", + "remove_coordinator_from_metadata", + ], + # because it queries node group id and it changes as we add / remove nodes + repeatable=False, + ), + "multi_mx_add_coordinator": TestDeps( + None, + [ + "multi_cluster_management", + "remove_coordinator_from_metadata", + "multi_mx_function_table_reference", + ], + ), + "metadata_sync_helpers": TestDeps( + None, + [ + "multi_mx_node_metadata", + "multi_cluster_management", + ], + ), + "multi_utilities": TestDeps( + "minimal_schedule", + ["multi_data_types"], + ), } diff --git a/src/test/regress/citus_tests/test/test_prepared_statements.py b/src/test/regress/citus_tests/test/test_prepared_statements.py new file mode 100644 index 000000000..761ecc30c --- /dev/null +++ b/src/test/regress/citus_tests/test/test_prepared_statements.py @@ -0,0 +1,30 @@ +def test_call_param(cluster): + # create a distributed table and an associated distributed procedure + # to ensure parameterized CALL succeed, even when the param is the + # distribution key. + coord = cluster.coordinator + coord.sql("CREATE TABLE test(i int)") + coord.sql( + """ + CREATE PROCEDURE p(_i INT) LANGUAGE plpgsql AS $$ + BEGIN + INSERT INTO test(i) VALUES (_i); + END; $$ + """ + ) + sql = "CALL p(%s)" + + # prepare/exec before distributing + coord.sql_prepared(sql, (1,)) + + coord.sql("SELECT create_distributed_table('test', 'i')") + coord.sql( + "SELECT create_distributed_function('p(int)', distribution_arg_name := '_i', colocate_with := 'test')" + ) + + # prepare/exec after distribution + coord.sql_prepared(sql, (2,)) + + sum_i = coord.sql_value("select sum(i) from test;") + + assert sum_i == 3 diff --git a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out index e2685c2d7..a559ec442 100644 --- a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out +++ b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out @@ -107,6 +107,12 @@ SELECT pg_catalog.citus_split_shard_by_split_points( (1 row) +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; -- Replication slots should be cleaned up diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index 9489664eb..c5ab0e2df 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -12,8 +12,19 @@ CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace \c - - - :master_port create user create_drop_db_test_user; set citus.enable_create_database_propagation=on; +-- Tests for create database propagation with template0 which should fail CREATE DATABASE mydatabase WITH OWNER = create_drop_db_test_user + TEMPLATE = 'template0' + ENCODING = 'UTF8' + CONNECTION LIMIT = 10 + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false; +ERROR: Only template1 is supported as template parameter for CREATE DATABASE +CREATE DATABASE mydatabase + WITH template=template1 + OWNER = create_drop_db_test_user ENCODING = 'UTF8' CONNECTION LIMIT = 10 TABLESPACE = create_drop_db_tablespace @@ -239,35 +250,6 @@ NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2" DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2" DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx ---test for unsupported options -CREATE DATABASE mydatabase - with CONNECTION LIMIT = 10 - ENCODING = 'UTF8' - LC_CTYPE = 'C.UTF-8' - ALLOW_CONNECTIONS = false - IS_TEMPLATE = false; -ERROR: CREATE DATABASE option "lc_ctype" is not supported -CREATE DATABASE mydatabase - with CONNECTION LIMIT = 10 - ENCODING = 'UTF8' - LC_CTYPE = 'C.UTF-8' - ALLOW_CONNECTIONS = false - IS_TEMPLATE = false; -ERROR: CREATE DATABASE option "lc_ctype" is not supported -CREATE DATABASE mydatabase - with CONNECTION LIMIT = 10 - ENCODING = 'UTF8' - LC_COLLATE = 'C.UTF-8' - ALLOW_CONNECTIONS = false - IS_TEMPLATE = false; -ERROR: CREATE DATABASE option "lc_collate" is not supported -CREATE DATABASE mydatabase - with CONNECTION LIMIT = 10 - ENCODING = 'UTF8' - LOCALE = 'C.UTF-8' - ALLOW_CONNECTIONS = false - IS_TEMPLATE = false; -ERROR: CREATE DATABASE option "locale" is not supported --clean up resources created by this test drop tablespace create_drop_db_tablespace; \c - - - :worker_1_port diff --git a/src/test/regress/expected/create_role_propagation.out b/src/test/regress/expected/create_role_propagation.out index 59f7948a1..48310bdc3 100644 --- a/src/test/regress/expected/create_role_propagation.out +++ b/src/test/regress/expected/create_role_propagation.out @@ -40,18 +40,10 @@ SELECT master_remove_node('localhost', :worker_2_port); CREATE ROLE create_role_with_everything SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 105 PASSWORD 'strong_password123^' VALID UNTIL '2045-05-05 00:00:00.00+00' IN ROLE create_role, create_group ROLE create_user, create_group_2 ADMIN create_role_2, create_user_2; CREATE ROLE create_role_with_nothing NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 3 PASSWORD 'weakpassword' VALID UNTIL '2015-05-05 00:00:00.00+00'; --- show that creating role from worker node is only allowed when create role --- propagation is off +-- show that creating role from worker node is allowed \c - - - :worker_1_port CREATE ROLE role_on_worker; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -BEGIN; -SET citus.enable_create_role_propagation TO off; -CREATE ROLE role_on_worker; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -ROLLBACK; +DROP ROLE role_on_worker; \c - - - :master_port -- edge case role names CREATE ROLE "create_role'edge"; @@ -217,17 +209,17 @@ CREATE ROLE dist_role_3; CREATE ROLE dist_role_4; SET citus.enable_create_role_propagation TO OFF; CREATE ROLE non_dist_role_1 SUPERUSER; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE non_dist_role_2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE non_dist_role_3; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE non_dist_role_4; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. SET citus.enable_create_role_propagation TO ON; SET ROLE dist_role_1; GRANT non_dist_role_1 TO non_dist_role_2; @@ -307,11 +299,11 @@ CREATE ROLE dist_mixed_3; CREATE ROLE dist_mixed_4; SET citus.enable_create_role_propagation TO OFF; CREATE ROLE nondist_mixed_1; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE nondist_mixed_2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- @@ -506,14 +498,14 @@ SELECT rolname, rolcanlogin FROM pg_authid WHERE rolname = 'create_role' OR roln -- test cascading grants SET citus.enable_create_role_propagation TO OFF; CREATE ROLE nondist_cascade_1; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE nondist_cascade_2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE nondist_cascade_3; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. SET citus.enable_create_role_propagation TO ON; CREATE ROLE dist_cascade; GRANT nondist_cascade_1 TO nondist_cascade_2; @@ -696,3 +688,4 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; (0 rows) \c - - - :master_port +DROP ROLE nondist_cascade_1, nondist_cascade_2, nondist_cascade_3, dist_cascade; diff --git a/src/test/regress/expected/distributed_domain.out b/src/test/regress/expected/distributed_domain.out index 30e388803..6fdb348eb 100644 --- a/src/test/regress/expected/distributed_domain.out +++ b/src/test/regress/expected/distributed_domain.out @@ -680,16 +680,7 @@ SELECT * FROM use_age_invalid ORDER BY 1; -- verify we can validate a constraint that is already validated, can happen when we add a node while a domain constraint was not validated ALTER DOMAIN age_invalid VALIDATE CONSTRAINT check_age_positive; -- test changing the owner of a domain -SET client_min_messages TO error; -SELECT 1 FROM run_command_on_workers($$ CREATE ROLE domain_owner; $$); - ?column? ---------------------------------------------------------------------- - 1 - 1 -(2 rows) - CREATE ROLE domain_owner; -RESET client_min_messages; CREATE DOMAIN alter_domain_owner AS int; ALTER DOMAIN alter_domain_owner OWNER TO domain_owner; SELECT u.rolname diff --git a/src/test/regress/expected/failure_create_index_concurrently.out b/src/test/regress/expected/failure_create_index_concurrently.out index 94d0f373d..784c91aec 100644 --- a/src/test/regress/expected/failure_create_index_concurrently.out +++ b/src/test/regress/expected/failure_create_index_concurrently.out @@ -28,7 +28,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, -if applicable, and then reattempt the original command. +if applicable, and then re-attempt the original command. ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -62,7 +62,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, -if applicable, and then reattempt the original command. +if applicable, and then re-attempt the original command. ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -90,7 +90,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid( CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, -if applicable, and then reattempt the original command. +if applicable, and then re-attempt the original command. ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -116,7 +116,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid( CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, -if applicable, and then reattempt the original command. +if applicable, and then re-attempt the original command. ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -143,7 +143,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP INDEX CONCURRENTLY").kill()'); DROP INDEX CONCURRENTLY IF EXISTS idx_index_test; WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, -if applicable, and then reattempt the original command. +if applicable, and then re-attempt the original command. ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -171,7 +171,7 @@ INSERT INTO index_test_2 VALUES (1, 1), (1, 2); CREATE UNIQUE INDEX CONCURRENTLY index_test_2_a_idx ON index_test_2(a); WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, -if applicable, and then reattempt the original command. +if applicable, and then re-attempt the original command. ERROR: could not create unique index "index_test_2_a_idx_1880019" DETAIL: Key (a)=(1) is duplicated. CONTEXT: while executing command on localhost:xxxxx diff --git a/src/test/regress/expected/failure_distributed_results.out b/src/test/regress/expected/failure_distributed_results.out index fc97c9af6..a316763e3 100644 --- a/src/test/regress/expected/failure_distributed_results.out +++ b/src/test/regress/expected/failure_distributed_results.out @@ -14,6 +14,8 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) SET citus.next_shard_id TO 100800; +-- Needed because of issue #7306 +SET citus.force_max_query_parallelization TO true; -- always try the 1st replica before the 2nd replica. SET citus.task_assignment_policy TO 'first-replica'; -- diff --git a/src/test/regress/expected/failure_split_cleanup.out b/src/test/regress/expected/failure_split_cleanup.out index fe646587c..d81335325 100644 --- a/src/test/regress/expected/failure_split_cleanup.out +++ b/src/test/regress/expected/failure_split_cleanup.out @@ -277,12 +277,12 @@ CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0 - 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 1 | 1 + 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1 777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx | 2 | 0 777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx | 2 | 0 @@ -336,7 +336,7 @@ CONTEXT: while executing command on localhost:xxxxx (1 row) SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- (0 rows) @@ -388,7 +388,7 @@ CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0 @@ -455,7 +455,7 @@ CONTEXT: while executing command on localhost:xxxxx (1 row) SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- (0 rows) @@ -507,7 +507,7 @@ CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0 @@ -574,7 +574,7 @@ CONTEXT: while executing command on localhost:xxxxx (1 row) SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- (0 rows) @@ -634,7 +634,7 @@ WARNING: connection to the remote node localhost:xxxxx failed with the followin ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 1 | 1 @@ -701,7 +701,7 @@ CONTEXT: while executing command on localhost:xxxxx (1 row) SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/global_cancel.out b/src/test/regress/expected/global_cancel.out index 5adeef3c8..e5ce4fbc6 100644 --- a/src/test/regress/expected/global_cancel.out +++ b/src/test/regress/expected/global_cancel.out @@ -9,9 +9,14 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); RESET client_min_messages; -- Kill maintenance daemon so it gets restarted and gets a gpid containing our -- nodeid -SELECT pg_terminate_backend(pid) +SELECT COUNT(pg_terminate_backend(pid)) >= 0 FROM pg_stat_activity -WHERE application_name = 'Citus Maintenance Daemon' \gset +WHERE application_name = 'Citus Maintenance Daemon'; + ?column? +--------------------------------------------------------------------- + t +(1 row) + -- reconnect to make sure we get a session with the gpid containing our nodeid \c - - - - CREATE SCHEMA global_cancel; @@ -77,6 +82,7 @@ ERROR: must be a superuser to terminate superuser process SELECT pg_cancel_backend(citus_backend_gpid()); ERROR: canceling statement due to user request \c - postgres - :master_port +DROP USER global_cancel_user; SET client_min_messages TO DEBUG; -- 10000000000 is the node id multiplier for global pid SELECT pg_cancel_backend(10000000000 * citus_coordinator_nodeid() + 0); diff --git a/src/test/regress/expected/insert_select_connection_leak.out b/src/test/regress/expected/insert_select_connection_leak.out index 8a983acd5..b342ecde1 100644 --- a/src/test/regress/expected/insert_select_connection_leak.out +++ b/src/test/regress/expected/insert_select_connection_leak.out @@ -47,16 +47,16 @@ INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 (1 row) END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 @@ -67,8 +67,8 @@ BEGIN; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; ROLLBACK; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 @@ -84,16 +84,16 @@ SAVEPOINT s1; INSERT INTO target_table SELECT a, CASE WHEN a < 50 THEN b ELSE null END FROM source_table; ERROR: null value in column "b" violates not-null constraint ROLLBACK TO SAVEPOINT s1; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 (1 row) END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 diff --git a/src/test/regress/expected/isolation_drop_vs_all.out b/src/test/regress/expected/isolation_drop_vs_all.out index 7dab13615..2c8912c21 100644 --- a/src/test/regress/expected/isolation_drop_vs_all.out +++ b/src/test/regress/expected/isolation_drop_vs_all.out @@ -226,7 +226,7 @@ step s1-drop: DROP TABLE drop_hash; step s2-table-size: SELECT citus_total_relation_size('drop_hash'); step s1-commit: COMMIT; step s2-table-size: <... completed> -ERROR: could not compute table size: relation does not exist +ERROR: could not compute relation size: relation does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index a9739a826..73610a455 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -94,7 +94,7 @@ step s2-commit: COMMIT; -starting permutation: s4-record-pid s3-show-activity s5-kill s3-show-activity +starting permutation: s4-record-pid s3-show-activity s5-kill s3-wait-backend-termination step s4-record-pid: SELECT pg_backend_pid() INTO selected_pid; @@ -115,12 +115,22 @@ pg_terminate_backend t (1 row) -step s3-show-activity: +step s3-wait-backend-termination: SET ROLE postgres; - select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid); - -count ---------------------------------------------------------------------- - 0 -(1 row) + DO $$ + DECLARE + i int; + BEGIN + i := 0; + -- try for 5 sec then timeout + WHILE (select count(*) > 0 from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid)) + LOOP + PERFORM pg_sleep(0.1); + i := i + 1; + IF i > 50 THEN + RAISE EXCEPTION 'Timeout while waiting for backend to terminate'; + END IF; + END LOOP; + END; + $$; diff --git a/src/test/regress/expected/isolation_master_update_node_1.out b/src/test/regress/expected/isolation_master_update_node_1.out new file mode 100644 index 000000000..474956629 --- /dev/null +++ b/src/test/regress/expected/isolation_master_update_node_1.out @@ -0,0 +1,68 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: BEGIN; +step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); +step s2-begin: BEGIN; +step s2-update-node-1: + -- update a specific node by address + SELECT master_update_node(nodeid, 'localhost', nodeport + 10) + FROM pg_dist_node + WHERE nodename = 'localhost' + AND nodeport = 57637; + +step s1-abort: ABORT; +step s2-update-node-1: <... completed> +master_update_node +--------------------------------------------------------------------- + +(1 row) + +step s2-abort: ABORT; +master_remove_node +--------------------------------------------------------------------- + + +(2 rows) + + +starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: BEGIN; +step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); +step s2-begin: BEGIN; +step s2-update-node-1-force: + -- update a specific node by address (force) + SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100) + FROM pg_dist_node + WHERE nodename = 'localhost' + AND nodeport = 57637; + +step s2-update-node-1-force: <... completed> +master_update_node +--------------------------------------------------------------------- + +(1 row) + +step s2-abort: ABORT; +step s1-abort: ABORT; +FATAL: terminating connection due to administrator command +FATAL: terminating connection due to administrator command +SSL connection has been closed unexpectedly +server closed the connection unexpectedly + +master_remove_node +--------------------------------------------------------------------- + + +(2 rows) + diff --git a/src/test/regress/expected/issue_5763.out b/src/test/regress/expected/issue_5763.out index aa6c4f35b..864297397 100644 --- a/src/test/regress/expected/issue_5763.out +++ b/src/test/regress/expected/issue_5763.out @@ -28,8 +28,8 @@ DROP USER issue_5763_3; -- test non-distributed role SET citus.enable_create_role_propagation TO off; CREATE USER issue_5763_4 WITH SUPERUSER; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. \c - issue_5763_4 - :master_port set citus.enable_ddl_propagation = off; CREATE SCHEMA issue_5763_sc_4; diff --git a/src/test/regress/expected/logical_replication.out b/src/test/regress/expected/logical_replication.out index 8a3e96da9..b5a36125a 100644 --- a/src/test/regress/expected/logical_replication.out +++ b/src/test/regress/expected/logical_replication.out @@ -32,23 +32,21 @@ CREATE SUBSCRIPTION citus_shard_move_subscription_:postgres_oid PUBLICATION citus_shard_move_publication_:postgres_oid WITH (enabled=false, slot_name=citus_shard_move_slot_:postgres_oid); NOTICE: created replication slot "citus_shard_move_slot_10" on publisher -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 1 + citus_shard_move_subscription_10 (1 row) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) SELECT count(*) FROM dist; count @@ -58,22 +56,21 @@ SELECT count(*) FROM dist; \c - - - :worker_1_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 0 +(0 rows) + +SELECT pubname from pg_publication; + pubname +--------------------------------------------------------------------- + citus_shard_move_publication_10 (1 row) -SELECT count(*) from pg_publication; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 1 -(1 row) - -SELECT count(*) from pg_replication_slots; - count ---------------------------------------------------------------------- - 1 + citus_shard_move_slot_10 (1 row) SELECT count(*) FROM dist; @@ -90,25 +87,29 @@ select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localho (1 row) +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + -- the subscription is still there, as there is no cleanup record for it -- we have created it manually -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 1 + citus_shard_move_subscription_10 (1 row) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) SELECT count(*) from dist; count @@ -120,22 +121,21 @@ SELECT count(*) from dist; SET search_path TO logical_replication; -- the publication and repslot are still there, as there are no cleanup records for them -- we have created them manually -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 0 +(0 rows) + +SELECT pubname from pg_publication; + pubname +--------------------------------------------------------------------- + citus_shard_move_publication_10 (1 row) -SELECT count(*) from pg_publication; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 1 -(1 row) - -SELECT count(*) from pg_replication_slots; - count ---------------------------------------------------------------------- - 1 + citus_shard_move_slot_10 (1 row) SELECT count(*) from dist; @@ -153,23 +153,20 @@ SELECT pg_drop_replication_slot('citus_shard_move_slot_' || :postgres_oid); \c - - - :worker_2_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) SELECT count(*) from dist; count diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 29d62c46a..a41ac9d5f 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -27,8 +27,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; --- in a distributed transaction and the application name is Citus --- but we are on the coordinator, so still not allowed +-- in a distributed transaction and the application name is Citus, allowed. BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id @@ -38,7 +37,11 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); -ERROR: This is an internal Citus function can only be used in a distributed transaction + citus_internal_add_partition_metadata +--------------------------------------------------------------------- + +(1 row) + ROLLBACK; \c - postgres - \c - - - :worker_1_port diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index e58b02937..3eb549ab5 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -90,7 +90,7 @@ SELECT citus_disable_node('localhost', :worker_2_port); (1 row) -SELECT public.wait_until_metadata_sync(60000); +SELECT public.wait_until_metadata_sync(20000); wait_until_metadata_sync --------------------------------------------------------------------- @@ -812,7 +812,7 @@ SELECT citus_disable_node('localhost', 9999); (1 row) -SELECT public.wait_until_metadata_sync(60000); +SELECT public.wait_until_metadata_sync(20000); wait_until_metadata_sync --------------------------------------------------------------------- @@ -1258,3 +1258,9 @@ SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHER t (1 row) +-- Grant all on public schema to public +-- +-- That's the default on Postgres versions < 15 and we want to +-- keep permissions compatible accross versions, in regression +-- tests. +GRANT ALL ON SCHEMA public TO PUBLIC; diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index e810b715e..42bcd6647 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -70,38 +70,43 @@ SELECT create_reference_table('ref'); (1 row) \c - - - :worker_1_port --- alter role from mx worker isn't allowed when alter role propagation is on -SET citus.enable_alter_role_propagation TO ON; -ALTER ROLE reprefuser WITH CREATEROLE; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. --- to alter role locally disable alter role propagation first +-- to alter role locally, disable alter role propagation first SET citus.enable_alter_role_propagation TO OFF; ALTER ROLE reprefuser WITH CREATEROLE; -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; - rolcreatedb | rolcreaterole +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; + result --------------------------------------------------------------------- - t | t -(1 row) + {"rolcreatedb": true, "rolcreaterole": false} + {"rolcreatedb": true, "rolcreaterole": false} + {"rolcreatedb": true, "rolcreaterole": true} +(3 rows) -RESET citus.enable_alter_role_propagation; -\c - - - :worker_2_port --- show that altering role locally on worker doesn't propagated to other worker -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; - rolcreatedb | rolcreaterole +-- alter role from mx worker is allowed +SET citus.enable_alter_role_propagation TO ON; +ALTER ROLE reprefuser WITH CREATEROLE; +-- show that altering role locally on worker is propagated to coordinator and to other workers too +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; + result --------------------------------------------------------------------- - t | f -(1 row) + {"rolcreatedb": true, "rolcreaterole": true} + {"rolcreatedb": true, "rolcreaterole": true} + {"rolcreatedb": true, "rolcreaterole": true} +(3 rows) \c - - - :master_port SET search_path TO mx_add_coordinator,public; --- show that altering role locally on worker doesn't propagated to coordinator -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; - rolcreatedb | rolcreaterole ---------------------------------------------------------------------- - t | f -(1 row) - SET citus.log_local_commands TO ON; SET client_min_messages TO DEBUG; -- if the placement policy is not round-robin, SELECTs on the reference @@ -124,7 +129,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM mx_add_coo 0 (1 row) --- test that distributed functions also use local execution +-- test that distributed functions also use sequential execution CREATE OR REPLACE FUNCTION my_group_id() RETURNS void LANGUAGE plpgsql @@ -365,5 +370,6 @@ SELECT verify_metadata('localhost', :worker_1_port), SET client_min_messages TO error; DROP SCHEMA mx_add_coordinator CASCADE; +DROP USER reprefuser; SET search_path TO DEFAULT; RESET client_min_messages; diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index ac7f90826..b9d3f7faa 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -3,6 +3,7 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1220000; +SET client_min_messages TO WARNING; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -15,6 +16,9 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port); (1 row) +-- cannot drop them at the end of the test file as other tests depend on them +DROP SCHEMA IF EXISTS citus_mx_test_schema, citus_mx_test_schema_join_1, citus_mx_test_schema_join_2 CASCADE; +DROP TABLE IF EXISTS nation_hash, lineitem_mx, orders_mx, customer_mx, nation_mx, part_mx, supplier_mx, mx_ddl_table, limit_orders_mx, multiple_hash_mx, app_analytics_events_mx, researchers_mx, labs_mx, objects_mx, articles_hash_mx, articles_single_shard_hash_mx, company_employees_mx; -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; @@ -42,7 +46,7 @@ BEGIN END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) +CREATE OR REPLACE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, @@ -65,14 +69,16 @@ SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset \endif CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale); CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); -CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); +CREATE TYPE citus_mx_test_schema.order_side_mx AS ENUM ('buy', 'sell'); -- now create required stuff in the worker 1 \c - - - :worker_1_port +SET client_min_messages TO WARNING; -- show that we do not support creating citus local tables from mx workers for now CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. +DROP TABLE citus_local_table; SET search_path TO citus_mx_test_schema; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( @@ -85,6 +91,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( ); -- now create required stuff in the worker 2 \c - - - :worker_2_port +SET client_min_messages TO WARNING; SET search_path TO citus_mx_test_schema; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( @@ -97,6 +104,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( ); -- connect back to the master, and do some more tests \c - - - :master_port +SET client_min_messages TO WARNING; SET citus.shard_replication_factor TO 1; SET search_path TO public; CREATE TABLE nation_hash( @@ -315,7 +323,7 @@ CREATE TABLE limit_orders_mx ( symbol text NOT NULL, bidder_id bigint NOT NULL, placed_at timestamp NOT NULL, - kind order_side_mx NOT NULL, + kind citus_mx_test_schema.order_side_mx NOT NULL, limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) ); SET citus.shard_count TO 2; @@ -473,6 +481,7 @@ ORDER BY table_name::text; (23 rows) \c - - - :worker_1_port +SET client_min_messages TO WARNING; SELECT table_name, citus_table_type, distribution_column, shard_count, table_owner FROM citus_tables ORDER BY table_name::text; @@ -978,6 +987,6 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR (469 rows) -- Show that altering type name is not supported from worker node -ALTER TYPE order_side_mx RENAME TO temp_order_side_mx; +ALTER TYPE citus_mx_test_schema.order_side_mx RENAME TO temp_order_side_mx; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition.out b/src/test/regress/expected/multi_mx_insert_select_repartition.out index 62f197c30..a3912ec8e 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition.out @@ -103,10 +103,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i 4 (1 row) + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a a diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out index 15deba0c0..62271f9a7 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out @@ -103,10 +103,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i 4 (1 row) + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a a diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index 707dcc472..6a152b515 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -9,7 +9,7 @@ SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 1; \set VERBOSITY terse -- Simulates a readonly node by setting default_transaction_read_only. -CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) +CREATE OR REPLACE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) RETURNS TEXT LANGUAGE sql AS $$ @@ -27,7 +27,7 @@ CREATE OR REPLACE FUNCTION raise_error_in_metadata_sync() RETURNS void LANGUAGE C STRICT AS 'citus'; -CREATE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ +CREATE OR REPLACE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ declare counter integer := -1; begin @@ -846,7 +846,22 @@ SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; db_to_drop (1 row) -DROP DATABASE db_to_drop; +DO $$ +DECLARE + i int := 0; +BEGIN + WHILE NOT (SELECT bool_and(success) from run_command_on_all_nodes('DROP DATABASE IF EXISTS db_to_drop')) + LOOP + BEGIN + i := i + 1; + IF i > 5 THEN + RAISE EXCEPTION 'DROP DATABASE timed out'; + END IF; + PERFORM pg_sleep(1); + END; + END LOOP; +END; +$$; SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; datname --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_size_queries.out b/src/test/regress/expected/multi_size_queries.out index 2ff8d9c4b..eb1981e64 100644 --- a/src/test/regress/expected/multi_size_queries.out +++ b/src/test/regress/expected/multi_size_queries.out @@ -7,19 +7,25 @@ SET citus.next_shard_id TO 1390000; -- Tests with invalid relation IDs SELECT citus_table_size(1); -ERROR: could not compute table size: relation does not exist +ERROR: could not compute relation size: relation does not exist SELECT citus_relation_size(1); -ERROR: could not compute table size: relation does not exist +ERROR: could not compute relation size: relation does not exist SELECT citus_total_relation_size(1); -ERROR: could not compute table size: relation does not exist +ERROR: could not compute relation size: relation does not exist -- Tests with non-distributed table -CREATE TABLE non_distributed_table (x int); +CREATE TABLE non_distributed_table (x int primary key); SELECT citus_table_size('non_distributed_table'); ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed SELECT citus_relation_size('non_distributed_table'); ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed SELECT citus_total_relation_size('non_distributed_table'); ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed +SELECT citus_table_size('non_distributed_table_pkey'); +ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed +SELECT citus_relation_size('non_distributed_table_pkey'); +ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed +SELECT citus_total_relation_size('non_distributed_table_pkey'); +ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed DROP TABLE non_distributed_table; -- fix broken placements via disabling the node SET client_min_messages TO ERROR; @@ -31,24 +37,70 @@ SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2, -- Tests on distributed table with replication factor > 1 VACUUM (FULL) lineitem_hash_part; -SELECT citus_table_size('lineitem_hash_part'); - citus_table_size +SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part'); + ?column? --------------------------------------------------------------------- - 3801088 + t (1 row) -SELECT citus_relation_size('lineitem_hash_part'); - citus_relation_size +SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part'); + ?column? --------------------------------------------------------------------- - 3801088 + t (1 row) -SELECT citus_total_relation_size('lineitem_hash_part'); - citus_total_relation_size +SELECT citus_relation_size('lineitem_hash_part') > 0; + ?column? --------------------------------------------------------------------- - 3801088 + t (1 row) +CREATE INDEX lineitem_hash_part_idx ON lineitem_hash_part(l_orderkey); +VACUUM (FULL) lineitem_hash_part; +SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part'); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part'); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_relation_size('lineitem_hash_part') > 0; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_relation_size('lineitem_hash_part_idx') <= citus_table_size('lineitem_hash_part_idx'); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_table_size('lineitem_hash_part_idx') <= citus_total_relation_size('lineitem_hash_part_idx'); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_relation_size('lineitem_hash_part_idx') > 0; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_total_relation_size('lineitem_hash_part') >= + citus_table_size('lineitem_hash_part') + citus_table_size('lineitem_hash_part_idx'); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +DROP INDEX lineitem_hash_part_idx; VACUUM (FULL) customer_copy_hash; -- Tests on distributed tables with streaming replication. SELECT citus_table_size('customer_copy_hash'); @@ -72,10 +124,10 @@ SELECT citus_total_relation_size('customer_copy_hash'); -- Make sure we can get multiple sizes in a single query SELECT citus_table_size('customer_copy_hash'), citus_table_size('customer_copy_hash'), - citus_table_size('supplier'); + citus_table_size('customer_copy_hash'); citus_table_size | citus_table_size | citus_table_size --------------------------------------------------------------------- - 548864 | 548864 | 655360 + 548864 | 548864 | 548864 (1 row) CREATE INDEX index_1 on customer_copy_hash(c_custkey); @@ -99,6 +151,24 @@ SELECT citus_total_relation_size('customer_copy_hash'); 2646016 (1 row) +SELECT citus_table_size('index_1'); + citus_table_size +--------------------------------------------------------------------- + 1048576 +(1 row) + +SELECT citus_relation_size('index_1'); + citus_relation_size +--------------------------------------------------------------------- + 1048576 +(1 row) + +SELECT citus_total_relation_size('index_1'); + citus_total_relation_size +--------------------------------------------------------------------- + 1048576 +(1 row) + -- Tests on reference table VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); @@ -139,6 +209,74 @@ SELECT citus_total_relation_size('supplier'); 688128 (1 row) +SELECT citus_table_size('index_2'); + citus_table_size +--------------------------------------------------------------------- + 122880 +(1 row) + +SELECT citus_relation_size('index_2'); + citus_relation_size +--------------------------------------------------------------------- + 122880 +(1 row) + +SELECT citus_total_relation_size('index_2'); + citus_total_relation_size +--------------------------------------------------------------------- + 122880 +(1 row) + +-- Test on partitioned table +CREATE TABLE split_me (dist_col int, partition_col timestamp) PARTITION BY RANGE (partition_col); +CREATE INDEX ON split_me(dist_col); +-- create 2 partitions +CREATE TABLE m PARTITION OF split_me FOR VALUES FROM ('2018-01-01') TO ('2019-01-01'); +CREATE TABLE e PARTITION OF split_me FOR VALUES FROM ('2019-01-01') TO ('2020-01-01'); +INSERT INTO split_me SELECT 1, '2018-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 360) i; +INSERT INTO split_me SELECT 2, '2019-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 180) i; +-- before citus +SELECT citus_relation_size('split_me'); +ERROR: cannot calculate the size because relation 'split_me' is not distributed +SELECT citus_relation_size('split_me_dist_col_idx'); +ERROR: cannot calculate the size because table 'split_me' for index 'split_me_dist_col_idx' is not distributed +SELECT citus_relation_size('m'); +ERROR: cannot calculate the size because relation 'm' is not distributed +SELECT citus_relation_size('m_dist_col_idx'); +ERROR: cannot calculate the size because table 'm' for index 'm_dist_col_idx' is not distributed +-- distribute the table(s) +SELECT create_distributed_table('split_me', 'dist_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- after citus +SELECT citus_relation_size('split_me'); + citus_relation_size +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT citus_relation_size('split_me_dist_col_idx'); + citus_relation_size +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT citus_relation_size('m'); + citus_relation_size +--------------------------------------------------------------------- + 32768 +(1 row) + +SELECT citus_relation_size('m_dist_col_idx'); + citus_relation_size +--------------------------------------------------------------------- + 81920 +(1 row) + +DROP TABLE split_me; -- Test inside the transaction BEGIN; ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL; diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index b82e54f16..d2b0940ed 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -348,6 +348,8 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM local_vacuum_table; +VACUUM local_vacuum_table; +VACUUM local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 25000000 THEN 22500000 ELSE s END FROM pg_total_relation_size('local_vacuum_table') s ; s @@ -401,6 +403,8 @@ VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 50000000 AND 70000000 THEN 60000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; size @@ -411,6 +415,8 @@ FROM pg_total_relation_size('local_vacuum_table') s ; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 49999999 THEN 35000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; size @@ -422,10 +428,14 @@ FROM pg_total_relation_size('local_vacuum_table') s ; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size1 \gset insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size2 \gset SELECT :size1 > :size2 as truncate_less_size; truncate_less_size diff --git a/src/test/regress/expected/role_command_from_any_node.out b/src/test/regress/expected/role_command_from_any_node.out new file mode 100644 index 000000000..e8700a204 --- /dev/null +++ b/src/test/regress/expected/role_command_from_any_node.out @@ -0,0 +1,274 @@ +-- idempotently remove the coordinator from metadata +SELECT COUNT(citus_remove_node(nodename, nodeport)) >= 0 FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :master_port; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- make sure that CREATE ROLE from workers is not supported when coordinator is not added to metadata +SELECT result FROM run_command_on_workers('CREATE ROLE test_role'); + result +--------------------------------------------------------------------- + ERROR: coordinator is not added to the metadata + ERROR: coordinator is not added to the metadata +(2 rows) + +\c - - - :master_port +CREATE SCHEMA role_command_from_any_node; +SET search_path TO role_command_from_any_node; +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE OR REPLACE FUNCTION check_role_on_all_nodes(p_role_name text) +RETURNS TABLE (node_type text, result text) +AS $func$ +DECLARE + v_worker_query text; +BEGIN + v_worker_query := format( + $$ + SELECT to_jsonb(q1.*) FROM ( + SELECT + ( + SELECT COUNT(*) = 1 FROM pg_roles WHERE rolname = '%s' + ) AS role_exists, + ( + SELECT to_jsonb(q.*) FROM (SELECT * FROM pg_roles WHERE rolname = '%s') q + ) AS role_properties, + ( + SELECT COUNT(*) = 1 + FROM pg_dist_object + WHERE objid = (SELECT oid FROM pg_roles WHERE rolname = '%s') + ) AS pg_dist_object_record_for_role_exists, + ( + SELECT COUNT(*) > 0 + FROM pg_dist_object + WHERE classid = 1260 AND objid NOT IN (SELECT oid FROM pg_roles) + ) AS stale_pg_dist_object_record_for_a_role_exists + ) q1 + $$, + p_role_name, p_role_name, p_role_name + ); + + RETURN QUERY + SELECT + CASE WHEN (groupid = 0 AND groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'coordinator (local)' + WHEN (groupid = 0) THEN 'coordinator (remote)' + WHEN (groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'worker node (local)' + ELSE 'worker node (remote)' + END AS node_type, + q2.result + FROM run_command_on_all_nodes(v_worker_query) q2 + JOIN pg_dist_node USING (nodeid); +END; +$func$ LANGUAGE plpgsql; +\c - - - :worker_1_port +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; +SET citus.enable_create_role_propagation TO OFF; +CREATE ROLE test_role; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +CREATE ROLE test_role; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +SET citus.enable_create_role_propagation TO ON; +-- doesn't fail even if the role doesn't exist on other nodes +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +CREATE ROLE test_role; +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +ALTER ROLE test_role_renamed RENAME TO test_role; +SET citus.enable_alter_role_propagation TO ON; +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE test_role_renamed CREATEDB; +SET citus.enable_alter_role_propagation TO ON; +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | false + worker node (local) | true + worker node (remote) | false +(3 rows) + +ALTER ROLE test_role_renamed CREATEDB; +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | true + worker node (local) | true + worker node (remote) | true +(3 rows) + +SET citus.enable_alter_role_set_propagation TO ON; +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + result +--------------------------------------------------------------------- + off + off + off +(3 rows) + +SET citus.enable_alter_role_set_propagation TO OFF; +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO ON; +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + result +--------------------------------------------------------------------- + off + off + on +(3 rows) + +SET citus.enable_alter_role_set_propagation TO ON; +ALTER ROLE current_user IN DATABASE "regression" RESET enable_hashjoin; +CREATE ROLE another_user; +SET citus.enable_create_role_propagation TO OFF; +GRANT another_user TO test_role_renamed; +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + result +--------------------------------------------------------------------- + f + f + t +(3 rows) + +SET citus.enable_create_role_propagation TO ON; +SET client_min_messages TO ERROR; +GRANT another_user TO test_role_renamed; +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +\c - - - :master_port +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; +SELECT citus_remove_node('localhost', :worker_1_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +SELECT 1 FROM citus_add_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- make sure that citus_add_node() propagates the roles created via a worker +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +SELECT citus_remove_node('localhost', :master_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_1_port +-- they fail because the coordinator is not added to metadata +DROP ROLE test_role_renamed; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +ALTER ROLE test_role_renamed RENAME TO test_role; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +ALTER ROLE test_role_renamed CREATEDB; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +GRANT another_user TO test_role_renamed; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +\c - - - :master_port +DROP ROLE test_role_renamed, another_user; +SET client_min_messages TO WARNING; +DROP SCHEMA role_command_from_any_node CASCADE; diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index f5b76c14c..2c399f24a 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -328,8 +328,8 @@ RESET citus.shard_replication_factor; -- test some more error handling. We create them later there. SET citus.enable_create_role_propagation TO OFF; CREATE USER testrole; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; ERROR: role "testrole" does not exist CONTEXT: while executing command on localhost:xxxxx @@ -731,8 +731,8 @@ ERROR: target node localhost:xxxxx is not responsive \c - - - :worker_1_port SET citus.enable_create_role_propagation TO OFF; CREATE USER testrole; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. @@ -745,8 +745,8 @@ ERROR: source node localhost:xxxxx is not responsive \c - - - :worker_2_port SET citus.enable_create_role_propagation TO OFF; CREATE USER testrole; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. diff --git a/src/test/regress/expected/single_node.out b/src/test/regress/expected/single_node.out index 3b24fd5f5..522ffb8e8 100644 --- a/src/test/regress/expected/single_node.out +++ b/src/test/regress/expected/single_node.out @@ -90,7 +90,7 @@ SELECT create_distributed_table('failover_to_local', 'a', shard_count=>32); CREATE INDEX CONCURRENTLY ON failover_to_local(a); WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, -if applicable, and then reattempt the original command. +if applicable, and then re-attempt the original command. ERROR: the total number of connections on the server is more than max_connections(100) HINT: Consider using a higher value for max_connections -- reset global GUC changes diff --git a/src/test/regress/expected/single_node_0.out b/src/test/regress/expected/single_node_0.out index a44460cca..12b385e96 100644 --- a/src/test/regress/expected/single_node_0.out +++ b/src/test/regress/expected/single_node_0.out @@ -90,7 +90,7 @@ SELECT create_distributed_table('failover_to_local', 'a', shard_count=>32); CREATE INDEX CONCURRENTLY ON failover_to_local(a); WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, -if applicable, and then reattempt the original command. +if applicable, and then re-attempt the original command. ERROR: the total number of connections on the server is more than max_connections(100) HINT: Consider using a higher value for max_connections -- reset global GUC changes diff --git a/src/test/regress/expected/text_search.out b/src/test/regress/expected/text_search.out index b9934a1d4..6c5b387ba 100644 --- a/src/test/regress/expected/text_search.out +++ b/src/test/regress/expected/text_search.out @@ -374,12 +374,21 @@ SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; (2 rows) -- verify they are all removed locally -SELECT 'text_search.config1'::regconfig; -ERROR: text search configuration "text_search.config1" does not exist -SELECT 'text_search.config2'::regconfig; -ERROR: text search configuration "text_search.config2" does not exist -SELECT 'text_search.config3'::regconfig; -ERROR: text search configuration "text_search.config3" does not exist +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config1' AND cfgnamespace = 'text_search'::regnamespace; + ?column? +--------------------------------------------------------------------- +(0 rows) + +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config2' AND cfgnamespace = 'text_search'::regnamespace; + ?column? +--------------------------------------------------------------------- +(0 rows) + +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config3' AND cfgnamespace = 'text_search'::regnamespace; + ?column? +--------------------------------------------------------------------- +(0 rows) + -- verify that indexes created concurrently that would propagate a TEXT SEARCH CONFIGURATION object SET citus.enable_ddl_propagation TO off; CREATE TEXT SEARCH CONFIGURATION concurrent_index_config ( PARSER = default ); @@ -434,12 +443,12 @@ $$) ORDER BY 1,2; CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = french ); -- now we expect manually_created_wrongly(citus_backup_XXX) to show up when querying the configurations SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; $$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | {manually_created_wrongly(citus_backup_0),manually_created_wrongly} - localhost | 57638 | t | {manually_created_wrongly(citus_backup_0),manually_created_wrongly} + localhost | 57637 | t | {manually_created_wrongly,manually_created_wrongly(citus_backup_0)} + localhost | 57638 | t | {manually_created_wrongly,manually_created_wrongly(citus_backup_0)} (2 rows) -- verify the objects get reused appropriately when the specification is the same @@ -458,7 +467,7 @@ CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = f -- now we don't expect manually_created_correct(citus_backup_XXX) to show up when querying the configurations as the -- original one is reused SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; $$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- diff --git a/src/test/regress/expected/validate_constraint.out b/src/test/regress/expected/validate_constraint.out index 08b03a2bf..b38e835fd 100644 --- a/src/test/regress/expected/validate_constraint.out +++ b/src/test/regress/expected/validate_constraint.out @@ -133,12 +133,6 @@ ORDER BY 1, 2; validatable_constraint_8000016 | t (10 rows) -DROP TABLE constrained_table; -DROP TABLE referenced_table CASCADE; -DROP TABLE referencing_table; +SET client_min_messages TO WARNING; DROP SCHEMA validate_constraint CASCADE; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to type constraint_validity -drop cascades to view constraint_validations_in_workers -drop cascades to view constraint_validations SET search_path TO DEFAULT; diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 3ad0eabfc..73696bde6 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -27,6 +27,7 @@ test: multi_cluster_management test: non_super_user_object_metadata test: propagate_foreign_servers test: alter_role_propagation +test: role_command_from_any_node test: propagate_extension_commands test: escape_extension_name test: ref_citus_local_fkeys @@ -165,7 +166,8 @@ test: with_executors with_join with_partitioning with_transactions with_dml # Tests around DDL statements run on distributed tables # ---------- test: multi_index_statements -test: multi_alter_table_statements alter_table_add_column +test: multi_alter_table_statements +test: alter_table_add_column test: multi_alter_table_add_constraints test: multi_alter_table_add_constraints_without_name test: multi_alter_table_add_foreign_key_without_name @@ -204,7 +206,8 @@ test: citus_copy_shard_placement # multi_utilities cannot be run in parallel with other tests because it checks # global locks test: multi_utilities -test: foreign_key_to_reference_table validate_constraint +test: foreign_key_to_reference_table +test: validate_constraint test: multi_repartition_udt multi_repartitioned_subquery_udf multi_subtransactions test: multi_modifying_xacts @@ -300,7 +303,8 @@ test: replicate_reference_tables_to_coordinator test: citus_local_tables test: mixed_relkind_tests test: multi_row_router_insert create_distributed_table_concurrently -test: multi_reference_table citus_local_tables_queries +test: multi_reference_table +test: citus_local_tables_queries test: citus_local_table_triggers test: coordinator_shouldhaveshards test: local_shard_utility_command_execution diff --git a/src/test/regress/multi_schedule_hyperscale b/src/test/regress/multi_schedule_hyperscale index 8849e81f2..86ac16d4f 100644 --- a/src/test/regress/multi_schedule_hyperscale +++ b/src/test/regress/multi_schedule_hyperscale @@ -154,7 +154,8 @@ test: multi_outer_join # --- test: multi_complex_count_distinct test: multi_upsert multi_simple_queries -test: foreign_key_to_reference_table validate_constraint +test: foreign_key_to_reference_table +test: validate_constraint # --------- # creates hash and range-partitioned tables and performs COPY diff --git a/src/test/regress/multi_schedule_hyperscale_superuser b/src/test/regress/multi_schedule_hyperscale_superuser index 052b93786..f5cddfc05 100644 --- a/src/test/regress/multi_schedule_hyperscale_superuser +++ b/src/test/regress/multi_schedule_hyperscale_superuser @@ -150,7 +150,9 @@ test: multi_outer_join test: multi_create_fdw test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list test: multi_upsert multi_simple_queries multi_data_types -test: multi_utilities foreign_key_to_reference_table validate_constraint +test: multi_utilities +test: foreign_key_to_reference_table +test: validate_constraint test: multi_repartition_udt multi_repartitioned_subquery_udf # --------- diff --git a/src/test/regress/spec/isolation_get_all_active_transactions.spec b/src/test/regress/spec/isolation_get_all_active_transactions.spec index 497b3a58a..8a2d5a5c6 100644 --- a/src/test/regress/spec/isolation_get_all_active_transactions.spec +++ b/src/test/regress/spec/isolation_get_all_active_transactions.spec @@ -107,6 +107,29 @@ step "s3-show-activity" select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid); } +step "s3-wait-backend-termination" +{ + SET ROLE postgres; + + DO $$ + DECLARE + i int; + BEGIN + i := 0; + + -- try for 5 sec then timeout + WHILE (select count(*) > 0 from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid)) + LOOP + PERFORM pg_sleep(0.1); + i := i + 1; + IF i > 50 THEN + RAISE EXCEPTION 'Timeout while waiting for backend to terminate'; + END IF; + END LOOP; + END; + $$; +} + session "s4" step "s4-record-pid" @@ -123,4 +146,4 @@ step "s5-kill" permutation "s1-grant" "s1-begin-insert" "s2-begin-insert" "s3-as-admin" "s3-as-user-1" "s3-as-readonly" "s3-as-monitor" "s1-commit" "s2-commit" -permutation "s4-record-pid" "s3-show-activity" "s5-kill" "s3-show-activity" +permutation "s4-record-pid" "s3-show-activity" "s5-kill" "s3-wait-backend-termination" diff --git a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec index 67c20a2b2..411faf889 100644 --- a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec +++ b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec @@ -22,6 +22,7 @@ setup teardown { + SELECT wait_until_metadata_sync(); DROP FUNCTION trigger_metadata_sync(); DROP TABLE deadlock_detection_test; DROP TABLE t2; diff --git a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql index ba3f95215..480d81b88 100644 --- a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql +++ b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql @@ -79,6 +79,8 @@ SELECT pg_catalog.citus_split_shard_by_split_points( ARRAY[:worker_2_node, :worker_2_node, :worker_2_node], 'force_logical'); +SELECT public.wait_for_resource_cleanup(); + \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; -- Replication slots should be cleaned up diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index ae90088d1..157c70b28 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -18,9 +18,19 @@ create user create_drop_db_test_user; set citus.enable_create_database_propagation=on; - +-- Tests for create database propagation with template0 which should fail CREATE DATABASE mydatabase WITH OWNER = create_drop_db_test_user + TEMPLATE = 'template0' + ENCODING = 'UTF8' + CONNECTION LIMIT = 10 + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false; + +CREATE DATABASE mydatabase + WITH template=template1 + OWNER = create_drop_db_test_user ENCODING = 'UTF8' CONNECTION LIMIT = 10 TABLESPACE = create_drop_db_tablespace @@ -192,36 +202,6 @@ create database "mydatabase#1'2"; set citus.grep_remote_commands = '%DROP DATABASE%'; drop database if exists "mydatabase#1'2"; ---test for unsupported options - -CREATE DATABASE mydatabase - with CONNECTION LIMIT = 10 - ENCODING = 'UTF8' - LC_CTYPE = 'C.UTF-8' - ALLOW_CONNECTIONS = false - IS_TEMPLATE = false; - -CREATE DATABASE mydatabase - with CONNECTION LIMIT = 10 - ENCODING = 'UTF8' - LC_CTYPE = 'C.UTF-8' - ALLOW_CONNECTIONS = false - IS_TEMPLATE = false; - -CREATE DATABASE mydatabase - with CONNECTION LIMIT = 10 - ENCODING = 'UTF8' - LC_COLLATE = 'C.UTF-8' - ALLOW_CONNECTIONS = false - IS_TEMPLATE = false; - -CREATE DATABASE mydatabase - with CONNECTION LIMIT = 10 - ENCODING = 'UTF8' - LOCALE = 'C.UTF-8' - ALLOW_CONNECTIONS = false - IS_TEMPLATE = false; - --clean up resources created by this test diff --git a/src/test/regress/sql/create_role_propagation.sql b/src/test/regress/sql/create_role_propagation.sql index 027e4f72e..fa32cf2d2 100644 --- a/src/test/regress/sql/create_role_propagation.sql +++ b/src/test/regress/sql/create_role_propagation.sql @@ -25,15 +25,10 @@ SELECT master_remove_node('localhost', :worker_2_port); CREATE ROLE create_role_with_everything SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 105 PASSWORD 'strong_password123^' VALID UNTIL '2045-05-05 00:00:00.00+00' IN ROLE create_role, create_group ROLE create_user, create_group_2 ADMIN create_role_2, create_user_2; CREATE ROLE create_role_with_nothing NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 3 PASSWORD 'weakpassword' VALID UNTIL '2015-05-05 00:00:00.00+00'; --- show that creating role from worker node is only allowed when create role --- propagation is off +-- show that creating role from worker node is allowed \c - - - :worker_1_port CREATE ROLE role_on_worker; - -BEGIN; -SET citus.enable_create_role_propagation TO off; -CREATE ROLE role_on_worker; -ROLLBACK; +DROP ROLE role_on_worker; \c - - - :master_port @@ -277,3 +272,5 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; \c - - - :worker_1_port SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; \c - - - :master_port + +DROP ROLE nondist_cascade_1, nondist_cascade_2, nondist_cascade_3, dist_cascade; diff --git a/src/test/regress/sql/distributed_domain.sql b/src/test/regress/sql/distributed_domain.sql index 5bf3bd6a8..0850c99ee 100644 --- a/src/test/regress/sql/distributed_domain.sql +++ b/src/test/regress/sql/distributed_domain.sql @@ -349,10 +349,7 @@ SELECT * FROM use_age_invalid ORDER BY 1; ALTER DOMAIN age_invalid VALIDATE CONSTRAINT check_age_positive; -- test changing the owner of a domain -SET client_min_messages TO error; -SELECT 1 FROM run_command_on_workers($$ CREATE ROLE domain_owner; $$); CREATE ROLE domain_owner; -RESET client_min_messages; CREATE DOMAIN alter_domain_owner AS int; ALTER DOMAIN alter_domain_owner OWNER TO domain_owner; diff --git a/src/test/regress/sql/failure_distributed_results.sql b/src/test/regress/sql/failure_distributed_results.sql index 95e4d5513..93e4a9a33 100644 --- a/src/test/regress/sql/failure_distributed_results.sql +++ b/src/test/regress/sql/failure_distributed_results.sql @@ -15,6 +15,8 @@ SET client_min_messages TO WARNING; SELECT citus.mitmproxy('conn.allow()'); SET citus.next_shard_id TO 100800; +-- Needed because of issue #7306 +SET citus.force_max_query_parallelization TO true; -- always try the 1st replica before the 2nd replica. SET citus.task_assignment_policy TO 'first-replica'; diff --git a/src/test/regress/sql/failure_split_cleanup.sql b/src/test/regress/sql/failure_split_cleanup.sql index 1b85d3d17..9dfbb245e 100644 --- a/src/test/regress/sql/failure_split_cleanup.sql +++ b/src/test/regress/sql/failure_split_cleanup.sql @@ -136,7 +136,7 @@ SELECT create_distributed_table('table_to_split', 'id'); ARRAY[:worker_1_node, :worker_2_node], 'force_logical'); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; -- we need to allow connection so that we can connect to proxy SELECT citus.mitmproxy('conn.allow()'); @@ -155,7 +155,7 @@ SELECT create_distributed_table('table_to_split', 'id'); \c - postgres - :master_port SELECT public.wait_for_resource_cleanup(); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; \c - - - :worker_2_proxy_port SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog; @@ -182,7 +182,7 @@ SELECT create_distributed_table('table_to_split', 'id'); ARRAY[:worker_1_node, :worker_2_node], 'force_logical'); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; -- we need to allow connection so that we can connect to proxy SELECT citus.mitmproxy('conn.allow()'); @@ -201,7 +201,7 @@ SELECT create_distributed_table('table_to_split', 'id'); \c - postgres - :master_port SELECT public.wait_for_resource_cleanup(); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; \c - - - :worker_2_proxy_port SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog; @@ -228,7 +228,7 @@ SELECT create_distributed_table('table_to_split', 'id'); ARRAY[:worker_1_node, :worker_2_node], 'force_logical'); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; -- we need to allow connection so that we can connect to proxy SELECT citus.mitmproxy('conn.allow()'); @@ -247,7 +247,7 @@ SELECT create_distributed_table('table_to_split', 'id'); \c - postgres - :master_port SELECT public.wait_for_resource_cleanup(); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; \c - - - :worker_2_proxy_port SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog; @@ -275,7 +275,7 @@ SELECT create_distributed_table('table_to_split', 'id'); 'force_logical'); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' order by relname; -- we need to allow connection so that we can connect to proxy SELECT citus.mitmproxy('conn.allow()'); @@ -295,7 +295,7 @@ SELECT create_distributed_table('table_to_split', 'id'); \c - postgres - :master_port SELECT public.wait_for_resource_cleanup(); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; \c - - - :worker_2_proxy_port SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog; diff --git a/src/test/regress/sql/global_cancel.sql b/src/test/regress/sql/global_cancel.sql index 848c3b01a..12330baf2 100644 --- a/src/test/regress/sql/global_cancel.sql +++ b/src/test/regress/sql/global_cancel.sql @@ -5,9 +5,9 @@ RESET client_min_messages; -- Kill maintenance daemon so it gets restarted and gets a gpid containing our -- nodeid -SELECT pg_terminate_backend(pid) +SELECT COUNT(pg_terminate_backend(pid)) >= 0 FROM pg_stat_activity -WHERE application_name = 'Citus Maintenance Daemon' \gset +WHERE application_name = 'Citus Maintenance Daemon'; -- reconnect to make sure we get a session with the gpid containing our nodeid \c - - - - @@ -58,6 +58,8 @@ SELECT pg_cancel_backend(citus_backend_gpid()); \c - postgres - :master_port +DROP USER global_cancel_user; + SET client_min_messages TO DEBUG; -- 10000000000 is the node id multiplier for global pid diff --git a/src/test/regress/sql/insert_select_connection_leak.sql b/src/test/regress/sql/insert_select_connection_leak.sql index 05afb10a0..e138f6c4d 100644 --- a/src/test/regress/sql/insert_select_connection_leak.sql +++ b/src/test/regress/sql/insert_select_connection_leak.sql @@ -33,12 +33,12 @@ INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; -- ROLLBACK BEGIN; @@ -46,8 +46,8 @@ INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; ROLLBACK; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; \set VERBOSITY TERSE @@ -59,12 +59,12 @@ SELECT worker_connection_count(:worker_1_port) AS worker_1_connections, SAVEPOINT s1; INSERT INTO target_table SELECT a, CASE WHEN a < 50 THEN b ELSE null END FROM source_table; ROLLBACK TO SAVEPOINT s1; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; SET client_min_messages TO WARNING; DROP SCHEMA insert_select_connection_leak CASCADE; diff --git a/src/test/regress/sql/logical_replication.sql b/src/test/regress/sql/logical_replication.sql index 3f8e048ca..a85c70b08 100644 --- a/src/test/regress/sql/logical_replication.sql +++ b/src/test/regress/sql/logical_replication.sql @@ -35,17 +35,17 @@ CREATE SUBSCRIPTION citus_shard_move_subscription_:postgres_oid WITH (enabled=false, slot_name=citus_shard_move_slot_:postgres_oid); -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) FROM dist; \c - - - :worker_1_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) FROM dist; \c - - - :master_port @@ -53,11 +53,13 @@ SET search_path TO logical_replication; select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); +SELECT public.wait_for_resource_cleanup(); + -- the subscription is still there, as there is no cleanup record for it -- we have created it manually -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) from dist; \c - - - :worker_1_port @@ -65,9 +67,9 @@ SET search_path TO logical_replication; -- the publication and repslot are still there, as there are no cleanup records for them -- we have created them manually -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) from dist; DROP PUBLICATION citus_shard_move_publication_:postgres_oid; @@ -76,9 +78,9 @@ SELECT pg_drop_replication_slot('citus_shard_move_slot_' || :postgres_oid); \c - - - :worker_2_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) from dist; \c - - - :master_port diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index a4044bab3..642b2f708 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -24,8 +24,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; --- in a distributed transaction and the application name is Citus --- but we are on the coordinator, so still not allowed +-- in a distributed transaction and the application name is Citus, allowed. BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index 9ec0eb28e..86fbd15b6 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -39,7 +39,7 @@ SELECT master_get_active_worker_nodes(); SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT citus_disable_node('localhost', :worker_2_port); -SELECT public.wait_until_metadata_sync(60000); +SELECT public.wait_until_metadata_sync(20000); SELECT master_get_active_worker_nodes(); -- add some shard placements to the cluster @@ -328,7 +328,7 @@ SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_g SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); SELECT master_activate_node('localhost', 9999); SELECT citus_disable_node('localhost', 9999); -SELECT public.wait_until_metadata_sync(60000); +SELECT public.wait_until_metadata_sync(20000); SELECT master_remove_node('localhost', 9999); -- check that you can't manually add two primaries to a group @@ -530,3 +530,10 @@ RESET citus.metadata_sync_mode; -- verify that at the end of this file, all primary nodes have metadata synced SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; + +-- Grant all on public schema to public +-- +-- That's the default on Postgres versions < 15 and we want to +-- keep permissions compatible accross versions, in regression +-- tests. +GRANT ALL ON SCHEMA public TO PUBLIC; diff --git a/src/test/regress/sql/multi_mx_add_coordinator.sql b/src/test/regress/sql/multi_mx_add_coordinator.sql index 47053cd28..a7ab2749a 100644 --- a/src/test/regress/sql/multi_mx_add_coordinator.sql +++ b/src/test/regress/sql/multi_mx_add_coordinator.sql @@ -41,23 +41,33 @@ CREATE TABLE ref(groupid int); SELECT create_reference_table('ref'); \c - - - :worker_1_port --- alter role from mx worker isn't allowed when alter role propagation is on -SET citus.enable_alter_role_propagation TO ON; -ALTER ROLE reprefuser WITH CREATEROLE; --- to alter role locally disable alter role propagation first +-- to alter role locally, disable alter role propagation first SET citus.enable_alter_role_propagation TO OFF; ALTER ROLE reprefuser WITH CREATEROLE; -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; -RESET citus.enable_alter_role_propagation; -\c - - - :worker_2_port --- show that altering role locally on worker doesn't propagated to other worker -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; + +-- alter role from mx worker is allowed +SET citus.enable_alter_role_propagation TO ON; +ALTER ROLE reprefuser WITH CREATEROLE; + +-- show that altering role locally on worker is propagated to coordinator and to other workers too +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; \c - - - :master_port SET search_path TO mx_add_coordinator,public; --- show that altering role locally on worker doesn't propagated to coordinator -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; SET citus.log_local_commands TO ON; SET client_min_messages TO DEBUG; @@ -67,7 +77,7 @@ SET client_min_messages TO DEBUG; SELECT count(*) FROM ref; SELECT count(*) FROM ref; --- test that distributed functions also use local execution +-- test that distributed functions also use sequential execution CREATE OR REPLACE FUNCTION my_group_id() RETURNS void LANGUAGE plpgsql @@ -190,5 +200,6 @@ SELECT verify_metadata('localhost', :worker_1_port), SET client_min_messages TO error; DROP SCHEMA mx_add_coordinator CASCADE; +DROP USER reprefuser; SET search_path TO DEFAULT; RESET client_min_messages; diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql index de3468415..4fb6eadbb 100644 --- a/src/test/regress/sql/multi_mx_create_table.sql +++ b/src/test/regress/sql/multi_mx_create_table.sql @@ -5,9 +5,15 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1220000; +SET client_min_messages TO WARNING; + SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +-- cannot drop them at the end of the test file as other tests depend on them +DROP SCHEMA IF EXISTS citus_mx_test_schema, citus_mx_test_schema_join_1, citus_mx_test_schema_join_2 CASCADE; +DROP TABLE IF EXISTS nation_hash, lineitem_mx, orders_mx, customer_mx, nation_mx, part_mx, supplier_mx, mx_ddl_table, limit_orders_mx, multiple_hash_mx, app_analytics_events_mx, researchers_mx, labs_mx, objects_mx, articles_hash_mx, articles_single_shard_hash_mx, company_employees_mx; + -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; @@ -38,7 +44,7 @@ END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) +CREATE OR REPLACE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; CREATE OPERATOR citus_mx_test_schema.=== ( @@ -67,14 +73,16 @@ SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale); CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); -CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); +CREATE TYPE citus_mx_test_schema.order_side_mx AS ENUM ('buy', 'sell'); -- now create required stuff in the worker 1 \c - - - :worker_1_port +SET client_min_messages TO WARNING; -- show that we do not support creating citus local tables from mx workers for now CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); +DROP TABLE citus_local_table; SET search_path TO citus_mx_test_schema; -- create operator @@ -89,6 +97,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( -- now create required stuff in the worker 2 \c - - - :worker_2_port +SET client_min_messages TO WARNING; SET search_path TO citus_mx_test_schema; @@ -104,6 +113,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( -- connect back to the master, and do some more tests \c - - - :master_port +SET client_min_messages TO WARNING; SET citus.shard_replication_factor TO 1; SET search_path TO public; @@ -308,7 +318,7 @@ CREATE TABLE limit_orders_mx ( symbol text NOT NULL, bidder_id bigint NOT NULL, placed_at timestamp NOT NULL, - kind order_side_mx NOT NULL, + kind citus_mx_test_schema.order_side_mx NOT NULL, limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) ); @@ -386,6 +396,7 @@ FROM citus_tables ORDER BY table_name::text; \c - - - :worker_1_port +SET client_min_messages TO WARNING; SELECT table_name, citus_table_type, distribution_column, shard_count, table_owner FROM citus_tables @@ -394,4 +405,4 @@ ORDER BY table_name::text; SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards ORDER BY shard_name::text; -- Show that altering type name is not supported from worker node -ALTER TYPE order_side_mx RENAME TO temp_order_side_mx; +ALTER TYPE citus_mx_test_schema.order_side_mx RENAME TO temp_order_side_mx; diff --git a/src/test/regress/sql/multi_mx_insert_select_repartition.sql b/src/test/regress/sql/multi_mx_insert_select_repartition.sql index 4a9c8c96f..b206c6e4e 100644 --- a/src/test/regress/sql/multi_mx_insert_select_repartition.sql +++ b/src/test/regress/sql/multi_mx_insert_select_repartition.sql @@ -55,6 +55,8 @@ SET citus.log_local_commands to on; -- INSERT .. SELECT via repartitioning with local execution BEGIN; select count(*) from source_table WHERE a = 1; + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; ROLLBACK; diff --git a/src/test/regress/sql/multi_mx_node_metadata.sql b/src/test/regress/sql/multi_mx_node_metadata.sql index 45b4edae1..e0d765a20 100644 --- a/src/test/regress/sql/multi_mx_node_metadata.sql +++ b/src/test/regress/sql/multi_mx_node_metadata.sql @@ -14,7 +14,7 @@ SET citus.shard_replication_factor TO 1; \set VERBOSITY terse -- Simulates a readonly node by setting default_transaction_read_only. -CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) +CREATE OR REPLACE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) RETURNS TEXT LANGUAGE sql AS $$ @@ -35,7 +35,7 @@ CREATE OR REPLACE FUNCTION raise_error_in_metadata_sync() LANGUAGE C STRICT AS 'citus'; -CREATE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ +CREATE OR REPLACE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ declare counter integer := -1; begin @@ -378,7 +378,22 @@ SELECT trigger_metadata_sync(); SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; -DROP DATABASE db_to_drop; +DO $$ +DECLARE + i int := 0; +BEGIN + WHILE NOT (SELECT bool_and(success) from run_command_on_all_nodes('DROP DATABASE IF EXISTS db_to_drop')) + LOOP + BEGIN + i := i + 1; + IF i > 5 THEN + RAISE EXCEPTION 'DROP DATABASE timed out'; + END IF; + PERFORM pg_sleep(1); + END; + END LOOP; +END; +$$; SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; diff --git a/src/test/regress/sql/multi_size_queries.sql b/src/test/regress/sql/multi_size_queries.sql index ff8d203f1..fdc3f7892 100644 --- a/src/test/regress/sql/multi_size_queries.sql +++ b/src/test/regress/sql/multi_size_queries.sql @@ -13,10 +13,15 @@ SELECT citus_relation_size(1); SELECT citus_total_relation_size(1); -- Tests with non-distributed table -CREATE TABLE non_distributed_table (x int); +CREATE TABLE non_distributed_table (x int primary key); + SELECT citus_table_size('non_distributed_table'); SELECT citus_relation_size('non_distributed_table'); SELECT citus_total_relation_size('non_distributed_table'); + +SELECT citus_table_size('non_distributed_table_pkey'); +SELECT citus_relation_size('non_distributed_table_pkey'); +SELECT citus_total_relation_size('non_distributed_table_pkey'); DROP TABLE non_distributed_table; -- fix broken placements via disabling the node @@ -26,9 +31,25 @@ SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2, -- Tests on distributed table with replication factor > 1 VACUUM (FULL) lineitem_hash_part; -SELECT citus_table_size('lineitem_hash_part'); -SELECT citus_relation_size('lineitem_hash_part'); -SELECT citus_total_relation_size('lineitem_hash_part'); +SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part'); +SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part'); +SELECT citus_relation_size('lineitem_hash_part') > 0; + +CREATE INDEX lineitem_hash_part_idx ON lineitem_hash_part(l_orderkey); +VACUUM (FULL) lineitem_hash_part; + +SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part'); +SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part'); +SELECT citus_relation_size('lineitem_hash_part') > 0; + +SELECT citus_relation_size('lineitem_hash_part_idx') <= citus_table_size('lineitem_hash_part_idx'); +SELECT citus_table_size('lineitem_hash_part_idx') <= citus_total_relation_size('lineitem_hash_part_idx'); +SELECT citus_relation_size('lineitem_hash_part_idx') > 0; + +SELECT citus_total_relation_size('lineitem_hash_part') >= + citus_table_size('lineitem_hash_part') + citus_table_size('lineitem_hash_part_idx'); + +DROP INDEX lineitem_hash_part_idx; VACUUM (FULL) customer_copy_hash; @@ -40,7 +61,7 @@ SELECT citus_total_relation_size('customer_copy_hash'); -- Make sure we can get multiple sizes in a single query SELECT citus_table_size('customer_copy_hash'), citus_table_size('customer_copy_hash'), - citus_table_size('supplier'); + citus_table_size('customer_copy_hash'); CREATE INDEX index_1 on customer_copy_hash(c_custkey); VACUUM (FULL) customer_copy_hash; @@ -50,6 +71,10 @@ SELECT citus_table_size('customer_copy_hash'); SELECT citus_relation_size('customer_copy_hash'); SELECT citus_total_relation_size('customer_copy_hash'); +SELECT citus_table_size('index_1'); +SELECT citus_relation_size('index_1'); +SELECT citus_total_relation_size('index_1'); + -- Tests on reference table VACUUM (FULL) supplier; @@ -64,6 +89,38 @@ SELECT citus_table_size('supplier'); SELECT citus_relation_size('supplier'); SELECT citus_total_relation_size('supplier'); +SELECT citus_table_size('index_2'); +SELECT citus_relation_size('index_2'); +SELECT citus_total_relation_size('index_2'); + +-- Test on partitioned table +CREATE TABLE split_me (dist_col int, partition_col timestamp) PARTITION BY RANGE (partition_col); +CREATE INDEX ON split_me(dist_col); + +-- create 2 partitions +CREATE TABLE m PARTITION OF split_me FOR VALUES FROM ('2018-01-01') TO ('2019-01-01'); +CREATE TABLE e PARTITION OF split_me FOR VALUES FROM ('2019-01-01') TO ('2020-01-01'); + +INSERT INTO split_me SELECT 1, '2018-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 360) i; +INSERT INTO split_me SELECT 2, '2019-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 180) i; + +-- before citus +SELECT citus_relation_size('split_me'); +SELECT citus_relation_size('split_me_dist_col_idx'); +SELECT citus_relation_size('m'); +SELECT citus_relation_size('m_dist_col_idx'); + +-- distribute the table(s) +SELECT create_distributed_table('split_me', 'dist_col'); + +-- after citus +SELECT citus_relation_size('split_me'); +SELECT citus_relation_size('split_me_dist_col_idx'); +SELECT citus_relation_size('m'); +SELECT citus_relation_size('m_dist_col_idx'); + +DROP TABLE split_me; + -- Test inside the transaction BEGIN; ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL; diff --git a/src/test/regress/sql/multi_utilities.sql b/src/test/regress/sql/multi_utilities.sql index 9a14ab590..1124b9890 100644 --- a/src/test/regress/sql/multi_utilities.sql +++ b/src/test/regress/sql/multi_utilities.sql @@ -229,6 +229,8 @@ VACUUM; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM local_vacuum_table; +VACUUM local_vacuum_table; +VACUUM local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 25000000 THEN 22500000 ELSE s END FROM pg_total_relation_size('local_vacuum_table') s ; @@ -257,12 +259,16 @@ VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 50000000 AND 70000000 THEN 60000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 49999999 THEN 35000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; @@ -270,11 +276,15 @@ FROM pg_total_relation_size('local_vacuum_table') s ; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size1 \gset insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size2 \gset SELECT :size1 > :size2 as truncate_less_size; diff --git a/src/test/regress/sql/role_command_from_any_node.sql b/src/test/regress/sql/role_command_from_any_node.sql new file mode 100644 index 000000000..0fd574716 --- /dev/null +++ b/src/test/regress/sql/role_command_from_any_node.sql @@ -0,0 +1,174 @@ +-- idempotently remove the coordinator from metadata +SELECT COUNT(citus_remove_node(nodename, nodeport)) >= 0 FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :master_port; + +-- make sure that CREATE ROLE from workers is not supported when coordinator is not added to metadata +SELECT result FROM run_command_on_workers('CREATE ROLE test_role'); + +\c - - - :master_port + +CREATE SCHEMA role_command_from_any_node; +SET search_path TO role_command_from_any_node; + +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); + +CREATE OR REPLACE FUNCTION check_role_on_all_nodes(p_role_name text) +RETURNS TABLE (node_type text, result text) +AS $func$ +DECLARE + v_worker_query text; +BEGIN + v_worker_query := format( + $$ + SELECT to_jsonb(q1.*) FROM ( + SELECT + ( + SELECT COUNT(*) = 1 FROM pg_roles WHERE rolname = '%s' + ) AS role_exists, + ( + SELECT to_jsonb(q.*) FROM (SELECT * FROM pg_roles WHERE rolname = '%s') q + ) AS role_properties, + ( + SELECT COUNT(*) = 1 + FROM pg_dist_object + WHERE objid = (SELECT oid FROM pg_roles WHERE rolname = '%s') + ) AS pg_dist_object_record_for_role_exists, + ( + SELECT COUNT(*) > 0 + FROM pg_dist_object + WHERE classid = 1260 AND objid NOT IN (SELECT oid FROM pg_roles) + ) AS stale_pg_dist_object_record_for_a_role_exists + ) q1 + $$, + p_role_name, p_role_name, p_role_name + ); + + RETURN QUERY + SELECT + CASE WHEN (groupid = 0 AND groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'coordinator (local)' + WHEN (groupid = 0) THEN 'coordinator (remote)' + WHEN (groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'worker node (local)' + ELSE 'worker node (remote)' + END AS node_type, + q2.result + FROM run_command_on_all_nodes(v_worker_query) q2 + JOIN pg_dist_node USING (nodeid); +END; +$func$ LANGUAGE plpgsql; + +\c - - - :worker_1_port + +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; + +SET citus.enable_create_role_propagation TO OFF; + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +SET citus.enable_create_role_propagation TO ON; + +-- doesn't fail even if the role doesn't exist on other nodes +DROP ROLE test_role; + +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +CREATE ROLE test_role; + +SET citus.enable_alter_role_propagation TO OFF; + +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +ALTER ROLE test_role_renamed RENAME TO test_role; + +SET citus.enable_alter_role_propagation TO ON; + +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE test_role_renamed CREATEDB; +SET citus.enable_alter_role_propagation TO ON; + +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +ALTER ROLE test_role_renamed CREATEDB; +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +SET citus.enable_alter_role_set_propagation TO ON; + +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; + +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + +SET citus.enable_alter_role_set_propagation TO OFF; + +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO ON; + +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + +SET citus.enable_alter_role_set_propagation TO ON; + +ALTER ROLE current_user IN DATABASE "regression" RESET enable_hashjoin; + +CREATE ROLE another_user; + +SET citus.enable_create_role_propagation TO OFF; + +GRANT another_user TO test_role_renamed; + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + +SET citus.enable_create_role_propagation TO ON; + +SET client_min_messages TO ERROR; +GRANT another_user TO test_role_renamed; +SET client_min_messages TO NOTICE; + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + +\c - - - :master_port + +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; + +SELECT citus_remove_node('localhost', :worker_1_port); +SELECT 1 FROM citus_add_node('localhost', :worker_1_port); + +-- make sure that citus_add_node() propagates the roles created via a worker +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +SELECT citus_remove_node('localhost', :master_port); + +\c - - - :worker_1_port + +-- they fail because the coordinator is not added to metadata +DROP ROLE test_role_renamed; +ALTER ROLE test_role_renamed RENAME TO test_role; +ALTER ROLE test_role_renamed CREATEDB; +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; +GRANT another_user TO test_role_renamed; + +\c - - - :master_port + +DROP ROLE test_role_renamed, another_user; + +SET client_min_messages TO WARNING; +DROP SCHEMA role_command_from_any_node CASCADE; diff --git a/src/test/regress/sql/text_search.sql b/src/test/regress/sql/text_search.sql index d0d4b5a6f..4a65a5e1a 100644 --- a/src/test/regress/sql/text_search.sql +++ b/src/test/regress/sql/text_search.sql @@ -199,9 +199,9 @@ SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; -- verify they are all removed locally -SELECT 'text_search.config1'::regconfig; -SELECT 'text_search.config2'::regconfig; -SELECT 'text_search.config3'::regconfig; +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config1' AND cfgnamespace = 'text_search'::regnamespace; +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config2' AND cfgnamespace = 'text_search'::regnamespace; +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config3' AND cfgnamespace = 'text_search'::regnamespace; -- verify that indexes created concurrently that would propagate a TEXT SEARCH CONFIGURATION object SET citus.enable_ddl_propagation TO off; @@ -235,7 +235,7 @@ CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = f -- now we expect manually_created_wrongly(citus_backup_XXX) to show up when querying the configurations SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; $$) ORDER BY 1,2; -- verify the objects get reused appropriately when the specification is the same @@ -249,7 +249,7 @@ CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = f -- now we don't expect manually_created_correct(citus_backup_XXX) to show up when querying the configurations as the -- original one is reused SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; $$) ORDER BY 1,2; CREATE SCHEMA "Text Search Requiring Quote's"; diff --git a/src/test/regress/sql/validate_constraint.sql b/src/test/regress/sql/validate_constraint.sql index 294e9a8b2..bb63f2854 100644 --- a/src/test/regress/sql/validate_constraint.sql +++ b/src/test/regress/sql/validate_constraint.sql @@ -116,9 +116,6 @@ SELECT * FROM constraint_validations_in_workers ORDER BY 1, 2; -DROP TABLE constrained_table; -DROP TABLE referenced_table CASCADE; -DROP TABLE referencing_table; - +SET client_min_messages TO WARNING; DROP SCHEMA validate_constraint CASCADE; SET search_path TO DEFAULT;