Merge branch 'main' into fix-unchecked-res--FindWorkerNode

pull/7704/head
Onur Tirtir 2025-03-05 15:45:29 +03:00 committed by GitHub
commit 64a4bc4af9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
60 changed files with 622 additions and 1471 deletions

View File

@ -68,7 +68,7 @@ USER citus
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions # build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
FROM base AS pg14 FROM base AS pg14
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.12 RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.15
RUN rm .pgenv/src/*.tar* RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install RUN make -C .pgenv/src/postgresql-*/src/include install
@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf RUN rm .pgenv-staging/config/default.conf
FROM base AS pg15 FROM base AS pg15
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.7 RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.10
RUN rm .pgenv/src/*.tar* RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install RUN make -C .pgenv/src/postgresql-*/src/include install
@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf RUN rm .pgenv-staging/config/default.conf
FROM base AS pg16 FROM base AS pg16
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.3 RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.6
RUN rm .pgenv/src/*.tar* RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install RUN make -C .pgenv/src/postgresql-*/src/include install
@ -211,7 +211,7 @@ COPY --chown=citus:citus .psqlrc .
RUN sudo chown --from=root:root citus:citus -R ~ RUN sudo chown --from=root:root citus:citus -R ~
# sets default pg version # sets default pg version
RUN pgenv switch 16.3 RUN pgenv switch 16.6
# make connecting to the coordinator easy # make connecting to the coordinator easy
ENV PGPORT=9700 ENV PGPORT=9700

View File

@ -6,7 +6,7 @@ inputs:
runs: runs:
using: composite using: composite
steps: steps:
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
name: Upload logs name: Upload logs
with: with:
name: ${{ inputs.folder }} name: ${{ inputs.folder }}

View File

@ -17,7 +17,7 @@ runs:
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
fi fi
shell: bash shell: bash
- uses: actions/download-artifact@v3.0.1 - uses: actions/download-artifact@v4.1.8
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
- name: Install Extension - name: Install Extension

View File

@ -21,7 +21,7 @@ runs:
mkdir -p /tmp/codeclimate mkdir -p /tmp/codeclimate
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
path: "/tmp/codeclimate/*.json" path: "/tmp/codeclimate/*.json"
name: codeclimate name: codeclimate-${{ inputs.flags }}

View File

@ -31,14 +31,14 @@ jobs:
pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester" pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester"
style_checker_image_name: "ghcr.io/citusdata/stylechecker" style_checker_image_name: "ghcr.io/citusdata/stylechecker"
style_checker_tools_version: "0.8.18" style_checker_tools_version: "0.8.18"
sql_snapshot_pg_version: "16.3" sql_snapshot_pg_version: "16.6"
image_suffix: "-v13fd57c" image_suffix: "-v5779674"
pg14_version: '{ "major": "14", "full": "14.12" }' pg14_version: '{ "major": "14", "full": "14.15" }'
pg15_version: '{ "major": "15", "full": "15.7" }' pg15_version: '{ "major": "15", "full": "15.10" }'
pg16_version: '{ "major": "16", "full": "16.3" }' pg16_version: '{ "major": "16", "full": "16.6" }'
upgrade_pg_versions: "14.12-15.7-16.3" upgrade_pg_versions: "14.15-15.10-16.6"
steps: steps:
# Since GHA jobs needs at least one step we use a noop step here. # Since GHA jobs need at least one step we use a noop step here.
- name: Set up parameters - name: Set up parameters
run: echo 'noop' run: echo 'noop'
check-sql-snapshots: check-sql-snapshots:
@ -48,7 +48,7 @@ jobs:
image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }}
options: --user root options: --user root
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- name: Check Snapshots - name: Check Snapshots
run: | run: |
git config --global --add safe.directory ${GITHUB_WORKSPACE} git config --global --add safe.directory ${GITHUB_WORKSPACE}
@ -125,7 +125,7 @@ jobs:
- name: Build - name: Build
run: "./ci/build-citus.sh" run: "./ci/build-citus.sh"
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
path: |- path: |-
@ -284,10 +284,12 @@ jobs:
check-arbitrary-configs parallel=4 CONFIGS=$TESTS check-arbitrary-configs parallel=4 CONFIGS=$TESTS
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.pg_major }}_upgrade flags: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-pg-upgrade: test-pg-upgrade:
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
@ -335,6 +337,8 @@ jobs:
if: failure() if: failure()
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
@ -380,10 +384,12 @@ jobs:
done; done;
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.PG_MAJOR }}_citus_upgrade
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.pg_major }}_upgrade flags: ${{ env.PG_MAJOR }}_citus_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
upload-coverage: upload-coverage:
if: always() if: always()
@ -399,10 +405,11 @@ jobs:
- test-citus-upgrade - test-citus-upgrade
- test-pg-upgrade - test-pg-upgrade
steps: steps:
- uses: actions/download-artifact@v3.0.1 - uses: actions/download-artifact@v4.1.8
with: with:
name: "codeclimate" pattern: codeclimate*
path: "codeclimate" path: codeclimate
merge-multiple: true
- name: Upload coverage results to Code Climate - name: Upload coverage results to Code Climate
run: |- run: |-
cc-test-reporter sum-coverage codeclimate/*.json -o total.json cc-test-reporter sum-coverage codeclimate/*.json -o total.json
@ -444,7 +451,7 @@ jobs:
chmod +x run_hammerdb.sh chmod +x run_hammerdb.sh
run_hammerdb.sh citusbot_tpcc_benchmark_rg run_hammerdb.sh citusbot_tpcc_benchmark_rg
prepare_parallelization_matrix_32: prepare_parallelization_matrix_32:
name: Parallel 32 name: Prepare parallelization matrix
if: ${{ needs.test-flakyness-pre.outputs.tests != ''}} if: ${{ needs.test-flakyness-pre.outputs.tests != ''}}
needs: test-flakyness-pre needs: test-flakyness-pre
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
@ -516,6 +523,7 @@ jobs:
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions/download-artifact@v4.1.8
- uses: "./.github/actions/setup_extension" - uses: "./.github/actions/setup_extension"
- name: Run minimal tests - name: Run minimal tests
run: |- run: |-
@ -529,3 +537,5 @@ jobs:
shell: bash shell: bash
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: test_flakyness_parallel_${{ matrix.id }}

View File

@ -34,7 +34,7 @@ jobs:
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
./ci/build-citus.sh ./ci/build-citus.sh
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
path: |- path: |-
@ -76,4 +76,4 @@ jobs:
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with: with:
folder: ${{ matrix.id }} folder: check_flakyness_parallel_${{ matrix.id }}

View File

@ -116,7 +116,6 @@ jobs:
# for each deb based image and we use POSTGRES_VERSION to set # for each deb based image and we use POSTGRES_VERSION to set
# PG_CONFIG variable in each of those runs. # PG_CONFIG variable in each of those runs.
packaging_docker_image: packaging_docker_image:
- debian-buster-all
- debian-bookworm-all - debian-bookworm-all
- debian-bullseye-all - debian-bullseye-all
- ubuntu-focal-all - ubuntu-focal-all
@ -130,7 +129,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Set pg_config path and python parameters for deb based distros - name: Set pg_config path and python parameters for deb based distros
run: | run: |

View File

@ -1,3 +1,51 @@
### citus v13.0.1 (February 4th, 2025) ###
* Drops support for PostgreSQL 14 (#7753)
### citus v13.0.0 (January 17, 2025) ###
* Adds support for PostgreSQL 17 (#7699, #7661)
* Adds `JSON_TABLE()` support in distributed queries (#7816)
* Propagates `MERGE ... WHEN NOT MATCHED BY SOURCE` (#7807)
* Propagates `MEMORY` and `SERIALIZE` options of `EXPLAIN` (#7802)
* Adds support for identity columns in distributed partitioned tables (#7785)
* Allows specifying an access method for distributed partitioned tables (#7818)
* Allows exclusion constraints on distributed partitioned tables (#7733)
* Allows configuring sslnegotiation using `citus.node_conn_info` (#7821)
* Avoids wal receiver timeouts during large shard splits (#7229)
* Fixes a bug causing incorrect writing of data to target `MERGE` repartition
command (#7659)
* Fixes a crash that happens because of unsafe catalog access when re-assigning
the global pid after `application_name` changes (#7791)
* Fixes incorrect `VALID UNTIL` setting assumption made for roles when syncing
them to new nodes (#7534)
* Fixes segfault when calling distributed procedure with a parameterized
distribution argument (#7242)
* Fixes server crash when trying to execute `activate_node_snapshot()` on a
single-node cluster (#7552)
* Improves `citus_move_shard_placement()` to fail early if there is a new node
without reference tables yet (#7467)
### citus v12.1.6 (Nov 14, 2024) ###
* Propagates `SECURITY LABEL .. ON ROLE` statements (#7304)
* Fixes crash caused by running queries with window partition (#7718)
### citus v12.1.5 (July 17, 2024) ### ### citus v12.1.5 (July 17, 2024) ###
* Adds support for MERGE commands with single shard distributed target tables * Adds support for MERGE commands with single shard distributed target tables

View File

@ -1,4 +1,4 @@
| **<br/>The Citus database is 100% open source.<br/><img width=1000/><br/>Learn what's new in the [Citus 12.1 release blog](https://www.citusdata.com/blog/2023/09/22/adding-postgres-16-support-to-citus-12-1/) and the [Citus Updates page](https://www.citusdata.com/updates/).<br/><br/>**| | **<br/>The Citus database is 100% open source.<br/><img width=1000/><br/>Learn what's new in the [Citus 13.0 release blog](https://www.citusdata.com/blog/2025/02/06/distribute-postgresql-17-with-citus-13/) and the [Citus Updates page](https://www.citusdata.com/updates/).<br/><br/>**|
|---| |---|
<br/> <br/>
@ -95,14 +95,14 @@ Install packages on Ubuntu / Debian:
```bash ```bash
curl https://install.citusdata.com/community/deb.sh > add-citus-repo.sh curl https://install.citusdata.com/community/deb.sh > add-citus-repo.sh
sudo bash add-citus-repo.sh sudo bash add-citus-repo.sh
sudo apt-get -y install postgresql-16-citus-12.1 sudo apt-get -y install postgresql-17-citus-13.0
``` ```
Install packages on CentOS / Red Hat: Install packages on Red Hat:
```bash ```bash
curl https://install.citusdata.com/community/rpm.sh > add-citus-repo.sh curl https://install.citusdata.com/community/rpm.sh > add-citus-repo.sh
sudo bash add-citus-repo.sh sudo bash add-citus-repo.sh
sudo yum install -y citus121_16 sudo yum install -y citus130_17
``` ```
To add Citus to your local PostgreSQL database, add the following to `postgresql.conf`: To add Citus to your local PostgreSQL database, add the following to `postgresql.conf`:

@ -1 +0,0 @@
Subproject commit 3376bd6845f0614908ed304f5033bd644c82d3bf

View File

@ -3021,6 +3021,8 @@ AvailableExtensionVersionColumnar(void)
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not found"))); errmsg("citus extension is not found")));
return NULL; /* keep compiler happy */
} }

View File

@ -2522,6 +2522,8 @@ AvailableExtensionVersion(void)
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not found"))); errmsg("citus extension is not found")));
return NULL; /* keep compiler happy */
} }

View File

@ -4688,7 +4688,7 @@ void
SendOrCollectCommandListToMetadataNodes(MetadataSyncContext *context, List *commands) SendOrCollectCommandListToMetadataNodes(MetadataSyncContext *context, List *commands)
{ {
/* /*
* do not send any command to workers if we collcet commands. * do not send any command to workers if we collect commands.
* Collect commands into metadataSyncContext's collected command * Collect commands into metadataSyncContext's collected command
* list. * list.
*/ */

View File

@ -1810,6 +1810,8 @@ CastExpr(Expr *expr, Oid sourceType, Oid targetType, Oid targetCollation,
ereport(ERROR, (errmsg("could not find a conversion path from type %d to %d", ereport(ERROR, (errmsg("could not find a conversion path from type %d to %d",
sourceType, targetType))); sourceType, targetType)));
} }
return NULL; /* keep compiler happy */
} }

View File

@ -190,6 +190,14 @@ PG_FUNCTION_INFO_V1(worker_save_query_explain_analyze);
void void
CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es) CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es)
{ {
#if PG_VERSION_NUM >= PG_VERSION_16
if (es->generic)
{
ereport(ERROR, (errmsg(
"EXPLAIN GENERIC_PLAN is currently not supported for Citus tables")));
}
#endif
CitusScanState *scanState = (CitusScanState *) node; CitusScanState *scanState = (CitusScanState *) node;
DistributedPlan *distributedPlan = scanState->distributedPlan; DistributedPlan *distributedPlan = scanState->distributedPlan;
EState *executorState = ScanStateGetExecutorState(scanState); EState *executorState = ScanStateGetExecutorState(scanState);
@ -992,18 +1000,12 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es)
appendStringInfo(explainQuery, appendStringInfo(explainQuery,
"EXPLAIN (ANALYZE %s, VERBOSE %s, " "EXPLAIN (ANALYZE %s, VERBOSE %s, "
"COSTS %s, BUFFERS %s, WAL %s, " "COSTS %s, BUFFERS %s, WAL %s, "
#if PG_VERSION_NUM >= PG_VERSION_16
"GENERIC_PLAN %s, "
#endif
"TIMING %s, SUMMARY %s, FORMAT %s) %s", "TIMING %s, SUMMARY %s, FORMAT %s) %s",
es->analyze ? "TRUE" : "FALSE", es->analyze ? "TRUE" : "FALSE",
es->verbose ? "TRUE" : "FALSE", es->verbose ? "TRUE" : "FALSE",
es->costs ? "TRUE" : "FALSE", es->costs ? "TRUE" : "FALSE",
es->buffers ? "TRUE" : "FALSE", es->buffers ? "TRUE" : "FALSE",
es->wal ? "TRUE" : "FALSE", es->wal ? "TRUE" : "FALSE",
#if PG_VERSION_NUM >= PG_VERSION_16
es->generic ? "TRUE" : "FALSE",
#endif
es->timing ? "TRUE" : "FALSE", es->timing ? "TRUE" : "FALSE",
es->summary ? "TRUE" : "FALSE", es->summary ? "TRUE" : "FALSE",
formatStr, formatStr,

View File

@ -1557,9 +1557,10 @@ MasterAggregateMutator(Node *originalNode, MasterAggregateWalkerContext *walkerC
} }
else if (IsA(originalNode, Var)) else if (IsA(originalNode, Var))
{ {
Var *newColumn = copyObject((Var *) originalNode); Var *origColumn = (Var *) originalNode;
newColumn->varno = masterTableId; Var *newColumn = makeVar(masterTableId, walkerContext->columnId,
newColumn->varattno = walkerContext->columnId; origColumn->vartype, origColumn->vartypmod,
origColumn->varcollid, origColumn->varlevelsup);
walkerContext->columnId++; walkerContext->columnId++;
newNode = (Node *) newColumn; newNode = (Node *) newColumn;

View File

@ -1834,16 +1834,6 @@ RegisterCitusConfigVariables(void)
GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE | GUC_UNIT_MS, GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE | GUC_UNIT_MS,
NULL, NULL, NULL); NULL, NULL, NULL);
DefineCustomStringVariable(
"citus.main_db",
gettext_noop("Which database is designated as the main_db"),
NULL,
&MainDb,
"",
PGC_POSTMASTER,
GUC_STANDARD,
NULL, NULL, NULL);
DefineCustomIntVariable( DefineCustomIntVariable(
"citus.max_adaptive_executor_pool_size", "citus.max_adaptive_executor_pool_size",
gettext_noop("Sets the maximum number of connections per worker node used by " gettext_noop("Sets the maximum number of connections per worker node used by "
@ -2890,14 +2880,27 @@ ApplicationNameAssignHook(const char *newval, void *extra)
DetermineCitusBackendType(newval); DetermineCitusBackendType(newval);
/* /*
* AssignGlobalPID might read from catalog tables to get the the local * We use StartupCitusBackend to initialize the global pid after catalogs
* nodeid. But ApplicationNameAssignHook might be called before catalog * are available. After that happens this hook becomes responsible to update
* access is available to the backend (such as in early stages of * the global pid on later application_name changes. So we set the
* authentication). We use StartupCitusBackend to initialize the global pid * FinishedStartupCitusBackend flag in StartupCitusBackend to indicate when
* after catalogs are available. After that happens this hook becomes * this responsibility handoff has happened.
* responsible to update the global pid on later application_name changes. *
* So we set the FinishedStartupCitusBackend flag in StartupCitusBackend to * Also note that when application_name changes, we don't actually need to
* indicate when this responsibility handoff has happened. * try re-assigning the global pid for external client backends and
* background workers because application_name doesn't affect the global
* pid for such backends - note that !IsExternalClientBackend() check covers
* both types of backends. Plus,
* trying to re-assign the global pid for such backends would unnecessarily
* cause performing a catalog access when the cached local node id is
* invalidated. However, accessing to the catalog tables is dangerous in
* certain situations like when we're not in a transaction block. And for
* the other types of backends, i.e., the Citus internal backends, we need
* to re-assign the global pid when the application_name changes because for
* such backends we simply extract the global pid inherited from the
* originating backend from the application_name -that's specified by
* originating backend when openning that connection- and this doesn't require
* catalog access.
* *
* Another solution to the catalog table acccess problem would be to update * Another solution to the catalog table acccess problem would be to update
* global pid lazily, like we do for HideShards. But that's not possible * global pid lazily, like we do for HideShards. But that's not possible
@ -2907,7 +2910,7 @@ ApplicationNameAssignHook(const char *newval, void *extra)
* as reasonably possible, which is also why we extract global pids in the * as reasonably possible, which is also why we extract global pids in the
* AuthHook already (extracting doesn't require catalog access). * AuthHook already (extracting doesn't require catalog access).
*/ */
if (FinishedStartupCitusBackend) if (FinishedStartupCitusBackend && !IsExternalClientBackend())
{ {
AssignGlobalPID(newval); AssignGlobalPID(newval);
} }

View File

@ -4,29 +4,21 @@
#include "udfs/citus_internal_database_command/12.2-1.sql" #include "udfs/citus_internal_database_command/12.2-1.sql"
#include "udfs/citus_add_rebalance_strategy/12.2-1.sql" #include "udfs/citus_add_rebalance_strategy/12.2-1.sql"
#include "udfs/start_management_transaction/12.2-1.sql"
#include "udfs/execute_command_on_remote_nodes_as_user/12.2-1.sql"
#include "udfs/mark_object_distributed/12.2-1.sql"
DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid, oid, int); DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid, oid, int);
#include "udfs/citus_unmark_object_distributed/12.2-1.sql" #include "udfs/citus_unmark_object_distributed/12.2-1.sql"
#include "udfs/commit_management_command_2pc/12.2-1.sql"
ALTER TABLE pg_catalog.pg_dist_transaction ADD COLUMN outer_xid xid8; ALTER TABLE pg_catalog.pg_dist_transaction ADD COLUMN outer_xid xid8;
#include "udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql" #include "udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql"
GRANT USAGE ON SCHEMA citus_internal TO PUBLIC; GRANT USAGE ON SCHEMA citus_internal TO PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.commit_management_command_2pc FROM PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.execute_command_on_remote_nodes_as_user FROM PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.find_groupid_for_node FROM PUBLIC; REVOKE ALL ON FUNCTION citus_internal.find_groupid_for_node FROM PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.mark_object_distributed FROM PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.pg_dist_node_trigger_func FROM PUBLIC; REVOKE ALL ON FUNCTION citus_internal.pg_dist_node_trigger_func FROM PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.pg_dist_rebalance_strategy_trigger_func FROM PUBLIC; REVOKE ALL ON FUNCTION citus_internal.pg_dist_rebalance_strategy_trigger_func FROM PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.pg_dist_shard_placement_trigger_func FROM PUBLIC; REVOKE ALL ON FUNCTION citus_internal.pg_dist_shard_placement_trigger_func FROM PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.refresh_isolation_tester_prepared_statement FROM PUBLIC; REVOKE ALL ON FUNCTION citus_internal.refresh_isolation_tester_prepared_statement FROM PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.replace_isolation_tester_func FROM PUBLIC; REVOKE ALL ON FUNCTION citus_internal.replace_isolation_tester_func FROM PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.restore_isolation_tester_func FROM PUBLIC; REVOKE ALL ON FUNCTION citus_internal.restore_isolation_tester_func FROM PUBLIC;
REVOKE ALL ON FUNCTION citus_internal.start_management_transaction FROM PUBLIC;
#include "udfs/citus_internal_add_colocation_metadata/12.2-1.sql" #include "udfs/citus_internal_add_colocation_metadata/12.2-1.sql"
#include "udfs/citus_internal_add_object_metadata/12.2-1.sql" #include "udfs/citus_internal_add_object_metadata/12.2-1.sql"

View File

@ -5,24 +5,9 @@ DROP FUNCTION citus_internal.acquire_citus_advisory_object_class_lock(int, cstri
#include "../udfs/citus_add_rebalance_strategy/10.1-1.sql" #include "../udfs/citus_add_rebalance_strategy/10.1-1.sql"
DROP FUNCTION citus_internal.start_management_transaction(
outer_xid xid8
);
DROP FUNCTION citus_internal.execute_command_on_remote_nodes_as_user(
query text,
username text
);
DROP FUNCTION citus_internal.mark_object_distributed(
classId Oid, objectName text, objectId Oid, connectionUser text
);
DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid,oid,int,boolean); DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid,oid,int,boolean);
#include "../udfs/citus_unmark_object_distributed/10.0-1.sql" #include "../udfs/citus_unmark_object_distributed/10.0-1.sql"
DROP FUNCTION citus_internal.commit_management_command_2pc();
ALTER TABLE pg_catalog.pg_dist_transaction DROP COLUMN outer_xid; ALTER TABLE pg_catalog.pg_dist_transaction DROP COLUMN outer_xid;
REVOKE USAGE ON SCHEMA citus_internal FROM PUBLIC; REVOKE USAGE ON SCHEMA citus_internal FROM PUBLIC;

View File

@ -1,7 +0,0 @@
CREATE OR REPLACE FUNCTION citus_internal.execute_command_on_remote_nodes_as_user(query text, username text)
RETURNS VOID
LANGUAGE C
AS 'MODULE_PATHNAME', $$execute_command_on_remote_nodes_as_user$$;
COMMENT ON FUNCTION citus_internal.execute_command_on_remote_nodes_as_user(query text, username text)
IS 'executes a query on the nodes other than the current one';

View File

@ -1,7 +0,0 @@
CREATE OR REPLACE FUNCTION citus_internal.execute_command_on_remote_nodes_as_user(query text, username text)
RETURNS VOID
LANGUAGE C
AS 'MODULE_PATHNAME', $$execute_command_on_remote_nodes_as_user$$;
COMMENT ON FUNCTION citus_internal.execute_command_on_remote_nodes_as_user(query text, username text)
IS 'executes a query on the nodes other than the current one';

View File

@ -1,7 +0,0 @@
CREATE OR REPLACE FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text)
RETURNS VOID
LANGUAGE C
AS 'MODULE_PATHNAME', $$mark_object_distributed$$;
COMMENT ON FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text)
IS 'adds an object to pg_dist_object on all nodes';

View File

@ -1,7 +0,0 @@
CREATE OR REPLACE FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text)
RETURNS VOID
LANGUAGE C
AS 'MODULE_PATHNAME', $$mark_object_distributed$$;
COMMENT ON FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text)
IS 'adds an object to pg_dist_object on all nodes';

View File

@ -1,7 +0,0 @@
CREATE OR REPLACE FUNCTION citus_internal.start_management_transaction(outer_xid xid8)
RETURNS VOID
LANGUAGE C
AS 'MODULE_PATHNAME', $$start_management_transaction$$;
COMMENT ON FUNCTION citus_internal.start_management_transaction(outer_xid xid8)
IS 'internal Citus function that starts a management transaction in the main database';

View File

@ -1,7 +0,0 @@
CREATE OR REPLACE FUNCTION citus_internal.start_management_transaction(outer_xid xid8)
RETURNS VOID
LANGUAGE C
AS 'MODULE_PATHNAME', $$start_management_transaction$$;
COMMENT ON FUNCTION citus_internal.start_management_transaction(outer_xid xid8)
IS 'internal Citus function that starts a management transaction in the main database';

View File

@ -190,6 +190,9 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS)
/* /*
* override_backend_data_gpid is a wrapper around SetBackendDataGpid(). * override_backend_data_gpid is a wrapper around SetBackendDataGpid().
* Also sets distributedCommandOriginator to true since the only caller of
* this method calls this function actually wants this backend to
* be treated as a distributed command originator with the given global pid.
*/ */
Datum Datum
override_backend_data_gpid(PG_FUNCTION_ARGS) override_backend_data_gpid(PG_FUNCTION_ARGS)
@ -199,6 +202,7 @@ override_backend_data_gpid(PG_FUNCTION_ARGS)
uint64 gpid = PG_GETARG_INT64(0); uint64 gpid = PG_GETARG_INT64(0);
SetBackendDataGlobalPID(gpid); SetBackendDataGlobalPID(gpid);
SetBackendDataDistributedCommandOriginator(true);
PG_RETURN_VOID(); PG_RETURN_VOID();
} }

View File

@ -855,6 +855,16 @@ GetCurrentDistributedTransactionId(void)
void void
AssignDistributedTransactionId(void) AssignDistributedTransactionId(void)
{ {
/*
* MyBackendData should always be available. However, we observed some
* crashes where certain hooks were not executed.
* Bug 3697586: Server crashes when assigning distributed transaction
*/
if (!MyBackendData)
{
ereport(ERROR, (errmsg("backend is not ready for distributed transactions")));
}
pg_atomic_uint64 *transactionNumberSequence = pg_atomic_uint64 *transactionNumberSequence =
&backendManagementShmemData->nextTransactionNumber; &backendManagementShmemData->nextTransactionNumber;
@ -964,6 +974,23 @@ SetBackendDataGlobalPID(uint64 gpid)
} }
/*
* SetBackendDataDistributedCommandOriginator sets the distributedCommandOriginator
* field on MyBackendData.
*/
void
SetBackendDataDistributedCommandOriginator(bool distributedCommandOriginator)
{
if (!MyBackendData)
{
return;
}
SpinLockAcquire(&MyBackendData->mutex);
MyBackendData->distributedCommandOriginator = distributedCommandOriginator;
SpinLockRelease(&MyBackendData->mutex);
}
/* /*
* GetGlobalPID returns the global process id of the current backend. * GetGlobalPID returns the global process id of the current backend.
*/ */

View File

@ -61,6 +61,7 @@ extern void AssignGlobalPID(const char *applicationName);
extern uint64 GetGlobalPID(void); extern uint64 GetGlobalPID(void);
extern void SetBackendDataDatabaseId(void); extern void SetBackendDataDatabaseId(void);
extern void SetBackendDataGlobalPID(uint64 gpid); extern void SetBackendDataGlobalPID(uint64 gpid);
extern void SetBackendDataDistributedCommandOriginator(bool distributedCommandOriginator);
extern uint64 ExtractGlobalPID(const char *applicationName); extern uint64 ExtractGlobalPID(const char *applicationName);
extern int ExtractNodeIdFromGlobalPID(uint64 globalPID, bool missingOk); extern int ExtractNodeIdFromGlobalPID(uint64 globalPID, bool missingOk);
extern int ExtractProcessIdFromGlobalPID(uint64 globalPID); extern int ExtractProcessIdFromGlobalPID(uint64 globalPID);

View File

@ -1,74 +0,0 @@
# This test checks that once citus.main_db is set and the
# server is restarted. A Citus Maintenance Daemon for the main_db
# is launched. This should happen even if there is no query run
# in main_db yet.
import time
def wait_until_maintenance_deamons_start(deamoncount, cluster):
i = 0
n = 0
while i < 10:
i += 1
n = cluster.coordinator.sql_value(
"SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';"
)
if n == deamoncount:
break
time.sleep(0.1)
assert n == deamoncount
def test_set_maindb(cluster_factory):
cluster = cluster_factory(0)
# Test that once citus.main_db is set to a database name
# there are two maintenance deamons running upon restart.
# One maintenance deamon for the database of the current connection
# and one for the citus.main_db.
cluster.coordinator.create_database("mymaindb")
cluster.coordinator.configure("citus.main_db='mymaindb'")
cluster.coordinator.restart()
assert cluster.coordinator.sql_value("SHOW citus.main_db;") == "mymaindb"
wait_until_maintenance_deamons_start(2, cluster)
assert (
cluster.coordinator.sql_value(
"SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname='mymaindb';"
)
== 1
)
# Test that once citus.main_db is set to empty string
# there is only one maintenance deamon for the database
# of the current connection.
cluster.coordinator.configure("citus.main_db=''")
cluster.coordinator.restart()
assert cluster.coordinator.sql_value("SHOW citus.main_db;") == ""
wait_until_maintenance_deamons_start(1, cluster)
# Test that after citus.main_db is dropped. The maintenance
# deamon for this database is terminated.
cluster.coordinator.configure("citus.main_db='mymaindb'")
cluster.coordinator.restart()
assert cluster.coordinator.sql_value("SHOW citus.main_db;") == "mymaindb"
wait_until_maintenance_deamons_start(2, cluster)
cluster.coordinator.sql("DROP DATABASE mymaindb;")
wait_until_maintenance_deamons_start(1, cluster)
assert (
cluster.coordinator.sql_value(
"SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname='mymaindb';"
)
== 0
)

View File

@ -1,198 +0,0 @@
def test_main_commited_outer_not_yet(cluster):
c = cluster.coordinator
w0 = cluster.workers[0]
# create a non-main database
c.sql("CREATE DATABASE db1")
# we will use cur1 to simulate non-main database user and
# cur2 to manually do the steps we would do in the main database
with c.cur(dbname="db1") as cur1, c.cur() as cur2:
# let's start a transaction and find its transaction id
cur1.execute("BEGIN")
cur1.execute("SELECT txid_current()")
txid = cur1.fetchall()
# using the transaction id of the cur1 simulate the main database commands manually
cur2.execute("BEGIN")
cur2.execute(
"SELECT citus_internal.start_management_transaction(%s)", (str(txid[0][0]),)
)
cur2.execute(
"SELECT citus_internal.execute_command_on_remote_nodes_as_user('CREATE USER u1;', 'postgres')"
)
cur2.execute(
"SELECT citus_internal.mark_object_distributed(1260, 'u1', 123123, 'postgres')"
)
cur2.execute("COMMIT")
# run the transaction recovery
c.sql("SELECT recover_prepared_transactions()")
# user should not be created on the worker because outer transaction is not committed yet
role_before_commit = w0.sql_value(
"SELECT count(*) FROM pg_roles WHERE rolname = 'u1'"
)
assert (
int(role_before_commit) == 0
), "role is in pg_dist_object despite not committing"
# user should not be in pg_dist_object on the coordinator because outer transaction is not committed yet
pdo_coordinator_before_commit = c.sql_value(
"SELECT count(*) FROM pg_dist_object WHERE objid = 123123"
)
assert (
int(pdo_coordinator_before_commit) == 0
), "role is in pg_dist_object on coordinator despite not committing"
# user should not be in pg_dist_object on the worker because outer transaction is not committed yet
pdo_worker_before_commit = w0.sql_value(
"SELECT count(*) FROM pg_dist_object WHERE objid::regrole::text = 'u1'"
)
assert (
int(pdo_worker_before_commit) == 0
), "role is in pg_dist_object on worker despite not committing"
# commit in cur1 so the transaction recovery thinks this is a successful transaction
cur1.execute("COMMIT")
# run the transaction recovery again after committing
c.sql("SELECT recover_prepared_transactions()")
# check that the user is created by the transaction recovery on the worker
role_after_commit = w0.sql_value(
"SELECT count(*) FROM pg_roles WHERE rolname = 'u1'"
)
assert (
int(role_after_commit) == 1
), "role is not created during recovery despite committing"
# check that the user is in pg_dist_object on the coordinator after transaction recovery
pdo_coordinator_after_commit = c.sql_value(
"SELECT count(*) FROM pg_dist_object WHERE objid = 123123"
)
assert (
int(pdo_coordinator_after_commit) == 1
), "role is not in pg_dist_object on coordinator after recovery despite committing"
# check that the user is in pg_dist_object on the worker after transaction recovery
pdo_worker_after_commit = w0.sql_value(
"SELECT count(*) FROM pg_dist_object WHERE objid::regrole::text = 'u1'"
)
assert (
int(pdo_worker_after_commit) == 1
), "role is not in pg_dist_object on worker after recovery despite committing"
c.sql("DROP DATABASE db1")
c.sql(
"SELECT citus_internal.execute_command_on_remote_nodes_as_user('DROP USER u1', 'postgres')"
)
c.sql(
"""
SELECT run_command_on_workers($$
DELETE FROM pg_dist_object
WHERE objid::regrole::text = 'u1'
$$)
"""
)
c.sql(
"""
DELETE FROM pg_dist_object
WHERE objid = 123123
"""
)
def test_main_commited_outer_aborted(cluster):
c = cluster.coordinator
w0 = cluster.workers[0]
# create a non-main database
c.sql("CREATE DATABASE db2")
# we will use cur1 to simulate non-main database user and
# cur2 to manually do the steps we would do in the main database
with c.cur(dbname="db2") as cur1, c.cur() as cur2:
# let's start a transaction and find its transaction id
cur1.execute("BEGIN")
cur1.execute("SELECT txid_current()")
txid = cur1.fetchall()
# using the transaction id of the cur1 simulate the main database commands manually
cur2.execute("BEGIN")
cur2.execute(
"SELECT citus_internal.start_management_transaction(%s)", (str(txid[0][0]),)
)
cur2.execute(
"SELECT citus_internal.execute_command_on_remote_nodes_as_user('CREATE USER u2;', 'postgres')"
)
cur2.execute(
"SELECT citus_internal.mark_object_distributed(1260, 'u2', 321321, 'postgres')"
)
cur2.execute("COMMIT")
# abort cur1 so the transaction recovery thinks this is an aborted transaction
cur1.execute("ABORT")
# check that the user is not yet created on the worker
role_before_recovery = w0.sql_value(
"SELECT count(*) FROM pg_roles WHERE rolname = 'u2'"
)
assert int(role_before_recovery) == 0, "role is already created before recovery"
# check that the user is not in pg_dist_object on the coordinator
pdo_coordinator_before_recovery = c.sql_value(
"SELECT count(*) FROM pg_dist_object WHERE objid = 321321"
)
assert (
int(pdo_coordinator_before_recovery) == 0
), "role is already in pg_dist_object on coordinator before recovery"
# check that the user is not in pg_dist_object on the worker
pdo_worker_before_recovery = w0.sql_value(
"SELECT count(*) FROM pg_dist_object WHERE objid::regrole::text = 'u2'"
)
assert (
int(pdo_worker_before_recovery) == 0
), "role is already in pg_dist_object on worker before recovery"
# run the transaction recovery
c.sql("SELECT recover_prepared_transactions()")
# check that the user is not created by the transaction recovery on the worker
role_after_recovery = w0.sql_value(
"SELECT count(*) FROM pg_roles WHERE rolname = 'u2'"
)
assert (
int(role_after_recovery) == 0
), "role is created during recovery despite aborting"
# check that the user is not in pg_dist_object on the coordinator after transaction recovery
pdo_coordinator_after_recovery = c.sql_value(
"SELECT count(*) FROM pg_dist_object WHERE objid = 321321"
)
assert (
int(pdo_coordinator_after_recovery) == 0
), "role is in pg_dist_object on coordinator after recovery despite aborting"
# check that the user is not in pg_dist_object on the worker after transaction recovery
pdo_worker_after_recovery = w0.sql_value(
"SELECT count(*) FROM pg_dist_object WHERE objid::regrole::text = 'u2'"
)
assert (
int(pdo_worker_after_recovery) == 0
), "role is in pg_dist_object on worker after recovery despite aborting"
c.sql("DROP DATABASE db2")

View File

@ -62,10 +62,16 @@ def run_citus_upgrade_tests(config, before_upgrade_schedule, after_upgrade_sched
install_citus(config.post_tar_path) install_citus(config.post_tar_path)
# disable 2pc recovery for all nodes to work around https://github.com/citusdata/citus/issues/7875
disable_2pc_recovery_for_all_nodes(config.bindir, config)
restart_databases(config.bindir, config.datadir, config.mixed_mode, config) restart_databases(config.bindir, config.datadir, config.mixed_mode, config)
run_alter_citus(config.bindir, config.mixed_mode, config) run_alter_citus(config.bindir, config.mixed_mode, config)
verify_upgrade(config, config.mixed_mode, config.node_name_to_ports.values()) verify_upgrade(config, config.mixed_mode, config.node_name_to_ports.values())
# re-enable 2pc recovery for all nodes
enable_2pc_recovery_for_all_nodes(config.bindir, config)
run_test_on_coordinator(config, after_upgrade_schedule) run_test_on_coordinator(config, after_upgrade_schedule)
remove_citus(config.post_tar_path) remove_citus(config.post_tar_path)
@ -146,6 +152,18 @@ def restart_database(pg_path, abs_data_path, node_name, node_ports, logfile_pref
subprocess.run(command, check=True) subprocess.run(command, check=True)
def disable_2pc_recovery_for_all_nodes(pg_path, config):
for port in config.node_name_to_ports.values():
utils.psql(pg_path, port, "ALTER SYSTEM SET citus.recover_2pc_interval TO -1;")
utils.psql(pg_path, port, "SELECT pg_reload_conf();")
def enable_2pc_recovery_for_all_nodes(pg_path, config):
for port in config.node_name_to_ports.values():
utils.psql(pg_path, port, "ALTER SYSTEM RESET citus.recover_2pc_interval;")
utils.psql(pg_path, port, "SELECT pg_reload_conf();")
def run_alter_citus(pg_path, mixed_mode, config): def run_alter_citus(pg_path, mixed_mode, config):
for port in config.node_name_to_ports.values(): for port in config.node_name_to_ports.values():
if mixed_mode and port in ( if mixed_mode and port in (

View File

@ -2,8 +2,6 @@
CREATE USER nonsuperuser CREATEROLE; CREATE USER nonsuperuser CREATEROLE;
SET ROLE nonsuperuser; SET ROLE nonsuperuser;
--- The non-superuser role should not be able to access citus_internal functions --- The non-superuser role should not be able to access citus_internal functions
SELECT citus_internal.commit_management_command_2pc();
ERROR: permission denied for function commit_management_command_2pc
SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.replace_isolation_tester_func();
ERROR: permission denied for function replace_isolation_tester_func ERROR: permission denied for function replace_isolation_tester_func
RESET ROLE; RESET ROLE;

View File

@ -78,11 +78,5 @@ SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER B
worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false}
(3 rows) (3 rows)
\c test_locale_provider - - :worker_2_port
set citus.enable_create_database_propagation to on;
create database unsupported_option_from_non_main_db with oid = 12345;
ERROR: CREATE DATABASE option "oid" is not supported
\c regression - - :master_port
set citus.enable_create_database_propagation to on;
drop database test_locale_provider; drop database test_locale_provider;
\c - - - :master_port \c - - - :master_port

View File

@ -0,0 +1,248 @@
--- Test for verifying that column references (var nodes) in targets that cannot be pushed down
--- do not cause issues for the postgres planner, in particular postgres versions 16+, where the
--- varnullingrels field of a VAR node may contain relids of join relations that can make the var
--- NULL; in a rewritten distributed query without a join such relids do not have a meaning.
--- Issue #7705: [SEGFAULT] Querying distributed tables with window partition causes segmentation fault
--- https://github.com/citusdata/citus/issues/7705
CREATE SCHEMA issue_7705;
SET search_path to 'issue_7705';
SET citus.next_shard_id TO 30070000;
SET citus.shard_replication_factor TO 1;
SET citus.enable_local_execution TO ON;
CREATE TABLE t1 (id INT PRIMARY KEY);
INSERT INTO t1 VALUES (1), (2);
CREATE TABLE t2 (id INT, account_id INT, a2 INT, PRIMARY KEY(id, account_id));
INSERT INTO t2 VALUES (3, 1, 10), (4, 2, 20), (5, 1, NULL);
SELECT create_distributed_table('t1', 'id');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$issue_7705.t1$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('t2', 'account_id');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$issue_7705.t2$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- Test the issue seen in #7705; a target expression with
-- a window function that cannot be pushed down because the
-- partion by is not on the distribution column also includes
-- a column from the inner side of a left outer join, which
-- produces a non-empty varnullingrels set in PG 16 (and higher)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
id | max
---------------------------------------------------------------------
1 | 10
2 | 20
1 |
(3 rows)
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
QUERY PLAN
---------------------------------------------------------------------
WindowAgg
Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3
-> Sort
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on issue_7705.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on issue_7705.t1_30070000 t1
Output: t1.id
(22 rows)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id;
id | max
---------------------------------------------------------------------
1 | 10
2 | 20
1 |
(3 rows)
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id;
QUERY PLAN
---------------------------------------------------------------------
WindowAgg
Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3
-> Sort
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (issue_7705.t2_30070004 t2 RIGHT JOIN issue_7705.t1_30070000 t1 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on issue_7705.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on issue_7705.t1_30070000 t1
Output: t1.id
(22 rows)
SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
id | max
---------------------------------------------------------------------
1 |
1 | 10
2 | 20
(3 rows)
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.id, (max(remote_scan.max) OVER (?)), remote_scan.worker_column_3
Group Key: remote_scan.id, max(remote_scan.max) OVER (?)
-> WindowAgg
Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3
-> Sort
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Sort Key: remote_scan.worker_column_3
-> Custom Scan (Citus Adaptive)
Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2, t2.id
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on issue_7705.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on issue_7705.t1_30070000 t1
Output: t1.id
(25 rows)
CREATE SEQUENCE test_seq START 101;
CREATE OR REPLACE FUNCTION TEST_F(int) returns INT language sql stable as $$ select $1 + 42; $$ ;
-- Issue #7705 also occurs if a target expression includes a column
-- of a distributed table that is on the inner side of a left outer
-- join and a call to nextval(), because nextval() cannot be pushed
-- down, and must be run on the coordinator
SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
id | test_f
---------------------------------------------------------------------
1 | 153
1 |
2 | 165
(3 rows)
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
QUERY PLAN
---------------------------------------------------------------------
Result
Output: remote_scan.id, ((remote_scan.test_f + (nextval('test_seq'::regclass))::integer) + 42)
-> Sort
Output: remote_scan.id, remote_scan.test_f
Sort Key: remote_scan.id
-> Custom Scan (Citus Adaptive)
Output: remote_scan.id, remote_scan.test_f
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS test_f FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on issue_7705.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on issue_7705.t1_30070000 t1
Output: t1.id
(22 rows)
SELECT t1.id, CASE nextval('test_seq') % 2 = 0 WHEN true THEN t2.a2 ELSE 1 END
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
id | case
---------------------------------------------------------------------
1 | 10
1 | 1
2 | 20
(3 rows)
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, CASE nextval('test_seq') %2 = 0 WHEN true THEN t2.a2 ELSE 1 END
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
QUERY PLAN
---------------------------------------------------------------------
Result
Output: remote_scan.id, CASE ((nextval('test_seq'::regclass) % '2'::bigint) = 0) WHEN CASE_TEST_EXPR THEN remote_scan."case" ELSE 1 END
-> Sort
Output: remote_scan.id, remote_scan."case"
Sort Key: remote_scan.id
-> Custom Scan (Citus Adaptive)
Output: remote_scan.id, remote_scan."case"
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS id, worker_column_2 AS "case" FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery
Node: host=localhost port=xxxxx dbname=regression
-> Hash Right Join
Output: t1.id, t2.a2
Inner Unique: true
Hash Cond: (t2.account_id = t1.id)
-> Seq Scan on issue_7705.t2_30070004 t2
Output: t2.id, t2.account_id, t2.a2
-> Hash
Output: t1.id
-> Seq Scan on issue_7705.t1_30070000 t1
Output: t1.id
(22 rows)
--- cleanup
\set VERBOSITY TERSE
DROP SCHEMA issue_7705 CASCADE;
NOTICE: drop cascades to 4 other objects
RESET all;

View File

@ -1431,20 +1431,16 @@ SELECT * FROM multi_extension.print_extension_changes();
| function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) void | function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) void
| function citus_internal.add_tenant_schema(oid,integer) void | function citus_internal.add_tenant_schema(oid,integer) void
| function citus_internal.adjust_local_clock_to_remote(cluster_clock) void | function citus_internal.adjust_local_clock_to_remote(cluster_clock) void
| function citus_internal.commit_management_command_2pc() void
| function citus_internal.database_command(text) void | function citus_internal.database_command(text) void
| function citus_internal.delete_colocation_metadata(integer) void | function citus_internal.delete_colocation_metadata(integer) void
| function citus_internal.delete_partition_metadata(regclass) void | function citus_internal.delete_partition_metadata(regclass) void
| function citus_internal.delete_placement_metadata(bigint) void | function citus_internal.delete_placement_metadata(bigint) void
| function citus_internal.delete_shard_metadata(bigint) void | function citus_internal.delete_shard_metadata(bigint) void
| function citus_internal.delete_tenant_schema(oid) void | function citus_internal.delete_tenant_schema(oid) void
| function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void
| function citus_internal.global_blocked_processes() SETOF record | function citus_internal.global_blocked_processes() SETOF record
| function citus_internal.is_replication_origin_tracking_active() boolean | function citus_internal.is_replication_origin_tracking_active() boolean
| function citus_internal.local_blocked_processes() SETOF record | function citus_internal.local_blocked_processes() SETOF record
| function citus_internal.mark_node_not_synced(integer,integer) void | function citus_internal.mark_node_not_synced(integer,integer) void
| function citus_internal.mark_object_distributed(oid,text,oid,text) void
| function citus_internal.start_management_transaction(xid8) void
| function citus_internal.start_replication_origin_tracking() void | function citus_internal.start_replication_origin_tracking() void
| function citus_internal.stop_replication_origin_tracking() void | function citus_internal.stop_replication_origin_tracking() void
| function citus_internal.unregister_tenant_schema_globally(oid,text) void | function citus_internal.unregister_tenant_schema_globally(oid,text) void
@ -1452,7 +1448,7 @@ SELECT * FROM multi_extension.print_extension_changes();
| function citus_internal.update_placement_metadata(bigint,integer,integer) void | function citus_internal.update_placement_metadata(bigint,integer,integer) void
| function citus_internal.update_relation_colocation(oid,integer) void | function citus_internal.update_relation_colocation(oid,integer) void
| function citus_unmark_object_distributed(oid,oid,integer,boolean) void | function citus_unmark_object_distributed(oid,oid,integer,boolean) void
(30 rows) (26 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version -- show running version

View File

@ -81,29 +81,9 @@ SELECT create_distributed_table('tenk1', 'unique1');
(1 row) (1 row)
SET citus.log_remote_commands TO on; SET citus.log_remote_commands TO on;
EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = 1000; EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = $1;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); ERROR: EXPLAIN GENERIC_PLAN is currently not supported for Citus tables
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = $1;
NOTICE: issuing SAVEPOINT citus_explain_savepoint
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing EXPLAIN (ANALYZE FALSE, VERBOSE FALSE, COSTS TRUE, BUFFERS FALSE, WAL FALSE, GENERIC_PLAN TRUE, TIMING FALSE, SUMMARY FALSE, FORMAT TEXT) SELECT unique1 FROM pg16.tenk1_950001 tenk1 WHERE (thousand OPERATOR(pg_catalog.=) 1000)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ROLLBACK TO SAVEPOINT citus_explain_savepoint
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on tenk1_950001 tenk1 (cost=0.00..35.50 rows=10 width=4)
Filter: (thousand = 1000)
(7 rows)
EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = 1000;
ERROR: EXPLAIN options ANALYZE and GENERIC_PLAN cannot be used together ERROR: EXPLAIN options ANALYZE and GENERIC_PLAN cannot be used together
SET citus.log_remote_commands TO off; SET citus.log_remote_commands TO off;
-- Proper error when creating statistics without a name on a Citus table -- Proper error when creating statistics without a name on a Citus table

View File

@ -5,6 +5,37 @@ SELECT master_remove_node('localhost', :master_port);
(1 row) (1 row)
-- to silence -potentially flaky- "could not establish connection after" warnings in below test
SET client_min_messages TO ERROR;
-- to fail fast when the hostname is not resolvable, as it will be the case below
SET citus.node_connection_timeout to '1s';
BEGIN;
SET application_name TO 'new_app_name';
-- that should fail because of bad hostname & port
SELECT citus_add_node('200.200.200.200', 1, 200);
ERROR: connection to the remote node postgres@200.200.200.200:1 failed
-- Since above command failed, now Postgres will need to revert the
-- application_name change made in this transaction and this will
-- happen within abort-transaction callback, so we won't be in a
-- transaction block while Postgres does that.
--
-- And when the application_name changes, Citus tries to re-assign
-- the global pid but it does so only for Citus internal backends,
-- and doing so for Citus internal backends doesn't require being
-- in a transaction block and is safe.
--
-- However, for the client external backends (like us here), Citus
-- doesn't re-assign the global pid because it's not needed and it's
-- not safe to do so outside of a transaction block. This is because,
-- it would require performing a catalog access to retrive the local
-- node id when the cached local node is invalidated like what just
-- happened here because of the failed citus_add_node() call made
-- above.
--
-- So by failing here (rather than crashing), we ensure this behavior.
ROLLBACK;
RESET client_min_messages;
RESET citus.node_connection_timeout;
-- restore coordinator for the rest of the tests -- restore coordinator for the rest of the tests
SELECT citus_set_coordinator_host('localhost', :master_port); SELECT citus_set_coordinator_host('localhost', :master_port);
citus_set_coordinator_host citus_set_coordinator_host

View File

@ -1,3 +1,5 @@
BEGIN;
SET LOCAL citus.show_shards_for_app_name_prefixes = '';
-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job" -- The following query retrieves the foreign key constraints of the table "pg_dist_background_job"
-- along with their details. This modification includes a fix for a null pointer exception that occurred -- along with their details. This modification includes a fix for a null pointer exception that occurred
-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604. -- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604.
@ -31,3 +33,4 @@ order by
pg_dist_background_task_depend_job_id_fkey | job_id | pg_dist_background_job | pg_catalog pg_dist_background_task_depend_job_id_fkey | job_id | pg_dist_background_job | pg_catalog
(2 rows) (2 rows)
END;

View File

@ -64,27 +64,23 @@ ORDER BY 1;
function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text)
function citus_internal.add_tenant_schema(oid,integer) function citus_internal.add_tenant_schema(oid,integer)
function citus_internal.adjust_local_clock_to_remote(cluster_clock) function citus_internal.adjust_local_clock_to_remote(cluster_clock)
function citus_internal.commit_management_command_2pc()
function citus_internal.database_command(text) function citus_internal.database_command(text)
function citus_internal.delete_colocation_metadata(integer) function citus_internal.delete_colocation_metadata(integer)
function citus_internal.delete_partition_metadata(regclass) function citus_internal.delete_partition_metadata(regclass)
function citus_internal.delete_placement_metadata(bigint) function citus_internal.delete_placement_metadata(bigint)
function citus_internal.delete_shard_metadata(bigint) function citus_internal.delete_shard_metadata(bigint)
function citus_internal.delete_tenant_schema(oid) function citus_internal.delete_tenant_schema(oid)
function citus_internal.execute_command_on_remote_nodes_as_user(text,text)
function citus_internal.find_groupid_for_node(text,integer) function citus_internal.find_groupid_for_node(text,integer)
function citus_internal.global_blocked_processes() function citus_internal.global_blocked_processes()
function citus_internal.is_replication_origin_tracking_active() function citus_internal.is_replication_origin_tracking_active()
function citus_internal.local_blocked_processes() function citus_internal.local_blocked_processes()
function citus_internal.mark_node_not_synced(integer,integer) function citus_internal.mark_node_not_synced(integer,integer)
function citus_internal.mark_object_distributed(oid,text,oid,text)
function citus_internal.pg_dist_node_trigger_func() function citus_internal.pg_dist_node_trigger_func()
function citus_internal.pg_dist_rebalance_strategy_trigger_func() function citus_internal.pg_dist_rebalance_strategy_trigger_func()
function citus_internal.pg_dist_shard_placement_trigger_func() function citus_internal.pg_dist_shard_placement_trigger_func()
function citus_internal.refresh_isolation_tester_prepared_statement() function citus_internal.refresh_isolation_tester_prepared_statement()
function citus_internal.replace_isolation_tester_func() function citus_internal.replace_isolation_tester_func()
function citus_internal.restore_isolation_tester_func() function citus_internal.restore_isolation_tester_func()
function citus_internal.start_management_transaction(xid8)
function citus_internal.start_replication_origin_tracking() function citus_internal.start_replication_origin_tracking()
function citus_internal.stop_replication_origin_tracking() function citus_internal.stop_replication_origin_tracking()
function citus_internal.unregister_tenant_schema_globally(oid,text) function citus_internal.unregister_tenant_schema_globally(oid,text)
@ -371,5 +367,5 @@ ORDER BY 1;
view citus_stat_tenants_local view citus_stat_tenants_local
view pg_dist_shard_placement view pg_dist_shard_placement
view time_partitions view time_partitions
(361 rows) (357 rows)

View File

@ -28,3 +28,12 @@ SELECT * FROM pg_dist_cleanup;
CALL citus_cleanup_orphaned_resources(); CALL citus_cleanup_orphaned_resources();
NOTICE: cleaned up 1 orphaned resources NOTICE: cleaned up 1 orphaned resources
DROP TABLE table_with_orphaned_shards; DROP TABLE table_with_orphaned_shards;
-- Re-enable automatic shard cleanup by maintenance daemon as
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)

View File

@ -30,6 +30,23 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELE
(1 row) (1 row)
-- create an orphaned placement based on an existing one -- create an orphaned placement based on an existing one
--
-- But before doing that, first disable automatic shard cleanup
-- by maintenance daemon so that we can reliably test the cleanup
-- in upgrade_pg_dist_cleanup_after.sql.
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)
SELECT pg_sleep(0.1);
pg_sleep
---------------------------------------------------------------------
(1 row)
INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid) INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
FROM pg_dist_placement FROM pg_dist_placement

View File

@ -34,7 +34,6 @@ test: failure_multi_row_insert
test: failure_mx_metadata_sync test: failure_mx_metadata_sync
test: failure_mx_metadata_sync_multi_trans test: failure_mx_metadata_sync_multi_trans
test: failure_connection_establishment test: failure_connection_establishment
test: failure_non_main_db_2pc
test: failure_create_database test: failure_create_database
# this test syncs metadata to the workers # this test syncs metadata to the workers

View File

@ -40,7 +40,6 @@ test: create_drop_database_propagation_pg15
test: create_drop_database_propagation_pg16 test: create_drop_database_propagation_pg16
test: comment_on_database test: comment_on_database
test: comment_on_role test: comment_on_role
test: metadata_sync_from_non_maindb
# don't parallelize single_shard_table_udfs to make sure colocation ids are sequential # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential
test: single_shard_table_udfs test: single_shard_table_udfs
test: schema_based_sharding test: schema_based_sharding
@ -58,7 +57,7 @@ test: multi_metadata_attributes
test: multi_read_from_secondaries test: multi_read_from_secondaries
test: grant_on_database_propagation grant_on_database_propagation_from_non_maindb test: grant_on_database_propagation
test: alter_database_propagation test: alter_database_propagation
test: citus_shards test: citus_shards

View File

@ -103,12 +103,11 @@ test: multi_dropped_column_aliases foreign_key_restriction_enforcement
test: binary_protocol test: binary_protocol
test: alter_table_set_access_method test: alter_table_set_access_method
test: alter_distributed_table test: alter_distributed_table
test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 issue_7477 test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 issue_7477 issue_7705
test: object_propagation_debug test: object_propagation_debug
test: undistribute_table test: undistribute_table
test: run_command_on_all_nodes test: run_command_on_all_nodes
test: background_task_queue_monitor test: background_task_queue_monitor
test: other_databases grant_role_from_non_maindb role_operations_from_non_maindb seclabel_non_maindb
test: citus_internal_access test: citus_internal_access
test: function_with_case_when test: function_with_case_when

View File

@ -492,7 +492,6 @@ push(@pgOptions, "citus.stat_statements_track = 'all'");
push(@pgOptions, "citus.enable_change_data_capture=on"); push(@pgOptions, "citus.enable_change_data_capture=on");
push(@pgOptions, "citus.stat_tenants_limit = 2"); push(@pgOptions, "citus.stat_tenants_limit = 2");
push(@pgOptions, "citus.stat_tenants_track = 'ALL'"); push(@pgOptions, "citus.stat_tenants_track = 'ALL'");
push(@pgOptions, "citus.main_db = 'regression'");
push(@pgOptions, "citus.superuser = 'postgres'"); push(@pgOptions, "citus.superuser = 'postgres'");
# Some tests look at shards in pg_class, make sure we can usually see them: # Some tests look at shards in pg_class, make sure we can usually see them:

View File

@ -3,7 +3,6 @@ CREATE USER nonsuperuser CREATEROLE;
SET ROLE nonsuperuser; SET ROLE nonsuperuser;
--- The non-superuser role should not be able to access citus_internal functions --- The non-superuser role should not be able to access citus_internal functions
SELECT citus_internal.commit_management_command_2pc();
SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.replace_isolation_tester_func();
RESET ROLE; RESET ROLE;

View File

@ -60,14 +60,6 @@ CREATE DATABASE test_locale_provider
SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER BY node_type; SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER BY node_type;
\c test_locale_provider - - :worker_2_port
set citus.enable_create_database_propagation to on;
create database unsupported_option_from_non_main_db with oid = 12345;
\c regression - - :master_port
set citus.enable_create_database_propagation to on;
drop database test_locale_provider; drop database test_locale_provider;
\c - - - :master_port \c - - - :master_port

View File

@ -1,75 +0,0 @@
SELECT citus.mitmproxy('conn.allow()');
CREATE SCHEMA failure_non_main_db_2pc;
SET SEARCH_PATH TO 'failure_non_main_db_2pc';
CREATE DATABASE other_db1;
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT PREPARED").kill()');
\c other_db1
CREATE USER user_1;
\c regression
SELECT citus.mitmproxy('conn.allow()');
SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_1'$$) ORDER BY 1;
SELECT recover_prepared_transactions();
SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_1'$$) ORDER BY 1;
SELECT citus.mitmproxy('conn.onQuery(query="CREATE USER user_2").kill()');
\c other_db1
CREATE USER user_2;
\c regression
SELECT citus.mitmproxy('conn.allow()');
SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_2'$$) ORDER BY 1;
SELECT recover_prepared_transactions();
SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_2'$$) ORDER BY 1;
DROP DATABASE other_db1;
-- user_2 should not exist because the query to create it will fail
-- but let's make sure we try to drop it just in case
DROP USER IF EXISTS user_1, user_2;
SELECT citus_set_coordinator_host('localhost');
\c - - - :worker_1_port
CREATE DATABASE other_db2;
SELECT citus.mitmproxy('conn.onQuery(query="COMMIT PREPARED").kill()');
\c other_db2
CREATE USER user_3;
\c regression
SELECT citus.mitmproxy('conn.allow()');
SELECT result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_3'$$) ORDER BY 1;
SELECT recover_prepared_transactions();
SELECT result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_3'$$) ORDER BY 1;
DROP DATABASE other_db2;
DROP USER user_3;
\c - - - :master_port
SELECT result FROM run_command_on_all_nodes($$DELETE FROM pg_dist_node WHERE groupid = 0$$);
DROP SCHEMA failure_non_main_db_2pc;

View File

@ -1,246 +0,0 @@
-- Public role has connect,temp,temporary privileges on database
-- To test these scenarios, we need to revoke these privileges from public role
-- since public role privileges are inherited by new roles/users
set citus.enable_create_database_propagation to on;
create database test_2pc_db;
show citus.main_db;
revoke connect,temp,temporary on database test_2pc_db from public;
CREATE SCHEMA grant_on_database_propagation_non_maindb;
SET search_path TO grant_on_database_propagation_non_maindb;
-- test grant/revoke CREATE privilege propagation on database
create user "myuser'_test";
\c test_2pc_db - - :master_port
grant create on database test_2pc_db to "myuser'_test";
\c regression - - :master_port;
select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']);
\c test_2pc_db - - :master_port
revoke create on database test_2pc_db from "myuser'_test";
\c regression - - :master_port;
select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']);
drop user "myuser'_test";
-----------------------------------------------------------------------
-- test grant/revoke CONNECT privilege propagation on database
\c regression - - :master_port
create user myuser2;
\c test_2pc_db - - :master_port
grant CONNECT on database test_2pc_db to myuser2;
\c regression - - :master_port;
select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']);
\c test_2pc_db - - :master_port
revoke connect on database test_2pc_db from myuser2;
\c regression - - :master_port
select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']);
drop user myuser2;
-----------------------------------------------------------------------
-- test grant/revoke TEMP privilege propagation on database
\c regression - - :master_port
create user myuser3;
-- test grant/revoke temp on database
\c test_2pc_db - - :master_port
grant TEMP on database test_2pc_db to myuser3;
\c regression - - :master_port;
select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']);
\c test_2pc_db - - :worker_1_port
revoke TEMP on database test_2pc_db from myuser3;
\c regression - - :master_port;
select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']);
drop user myuser3;
-----------------------------------------------------------------------
\c regression - - :master_port
-- test temporary privilege on database
create user myuser4;
-- test grant/revoke temporary on database
\c test_2pc_db - - :worker_1_port
grant TEMPORARY on database test_2pc_db to myuser4;
\c regression - - :master_port
select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']);
\c test_2pc_db - - :master_port
revoke TEMPORARY on database test_2pc_db from myuser4;
\c regression - - :master_port;
select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']);
drop user myuser4;
-----------------------------------------------------------------------
-- test ALL privileges with ALL statement on database
create user myuser5;
grant ALL on database test_2pc_db to myuser5;
\c regression - - :master_port
select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
\c test_2pc_db - - :master_port
revoke ALL on database test_2pc_db from myuser5;
\c regression - - :master_port
select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
drop user myuser5;
-----------------------------------------------------------------------
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database
create user myuser6;
\c test_2pc_db - - :master_port
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6;
\c regression - - :master_port
select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
\c test_2pc_db - - :master_port
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6;
\c regression - - :master_port
select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
drop user myuser6;
-----------------------------------------------------------------------
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option
create user myuser7;
create user myuser_1;
\c test_2pc_db - - :master_port
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7;
set role myuser7;
--here since myuser7 does not have grant option, it should fail
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1;
\c regression - - :master_port
select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
\c test_2pc_db - - :master_port
RESET ROLE;
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option;
set role myuser7;
--here since myuser have grant option, it should succeed
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7;
\c regression - - :master_port
select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
\c test_2pc_db - - :master_port
RESET ROLE;
--below test should fail and should throw an error since myuser_1 still have the dependent privileges
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict;
--below test should fail and should throw an error since myuser_1 still have the dependent privileges
revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ;
--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade
revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ;
--here we test if myuser7 still have the privileges after revoke grant option for
\c regression - - :master_port
select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
\c test_2pc_db - - :master_port
reset role;
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7;
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1;
\c regression - - :master_port
drop user myuser_1;
drop user myuser7;
-----------------------------------------------------------------------
-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database
-- and multi user
\c regression - - :master_port
create user myuser8;
create user myuser_2;
set citus.enable_create_database_propagation to on;
create database test_db;
revoke connect,temp,temporary on database test_db from public;
\c test_2pc_db - - :master_port
grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2;
\c regression - - :master_port
select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
\c test_2pc_db - - :master_port
RESET ROLE;
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ;
--below test should succeed and should not throw any error
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2;
--below test should succeed and should not throw any error
revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade;
\c regression - - :master_port
select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']);
\c test_2pc_db - - :master_port
reset role;
\c regression - - :master_port
drop user myuser_2;
drop user myuser8;
set citus.enable_create_database_propagation to on;
drop database test_db;
---------------------------------------------------------------------------
-- rollbacks public role database privileges to original state
grant connect,temp,temporary on database test_2pc_db to public;
drop database test_2pc_db;
set citus.enable_create_database_propagation to off;
DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE;
reset citus.enable_create_database_propagation;
reset search_path;
---------------------------------------------------------------------------

View File

@ -1,147 +0,0 @@
CREATE SCHEMA grant_role2pc;
SET search_path TO grant_role2pc;
set citus.enable_create_database_propagation to on;
CREATE DATABASE grant_role2pc_db;
\c grant_role2pc_db
SHOW citus.main_db;
SET citus.superuser TO 'postgres';
CREATE USER grant_role2pc_user1;
CREATE USER grant_role2pc_user2;
CREATE USER grant_role2pc_user3;
CREATE USER grant_role2pc_user4;
CREATE USER grant_role2pc_user5;
CREATE USER grant_role2pc_user6;
CREATE USER grant_role2pc_user7;
\c grant_role2pc_db
--test with empty superuser
SET citus.superuser TO '';
grant grant_role2pc_user1 to grant_role2pc_user2;
SET citus.superuser TO 'postgres';
grant grant_role2pc_user1 to grant_role2pc_user2 with admin option granted by CURRENT_USER;
\c regression
select result FROM run_command_on_all_nodes(
$$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
FROM pg_auth_members
WHERE member::regrole::text = 'grant_role2pc_user2'
order by member::regrole::text, roleid::regrole::text
) t
$$
);
\c grant_role2pc_db
--test grant under transactional context with multiple operations
BEGIN;
grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION;
grant grant_role2pc_user1 to grant_role2pc_user4 granted by grant_role2pc_user3 ;
COMMIT;
BEGIN;
grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION granted by grant_role2pc_user3;
grant grant_role2pc_user1 to grant_role2pc_user6;
ROLLBACK;
BEGIN;
grant grant_role2pc_user1 to grant_role2pc_user7;
SELECT 1/0;
commit;
\c regression
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
FROM pg_auth_members
WHERE member::regrole::text in
('grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7')
order by member::regrole::text, roleid::regrole::text
) t
$$);
\c grant_role2pc_db
grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3;
\c regression
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
FROM pg_auth_members
WHERE member::regrole::text in
('grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7')
order by member::regrole::text, roleid::regrole::text
) t
$$);
\c grant_role2pc_db
revoke admin option for grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3;
--test revoke under transactional context with multiple operations
BEGIN;
revoke grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3 ;
revoke grant_role2pc_user1 from grant_role2pc_user4 granted by grant_role2pc_user3;
COMMIT;
\c grant_role2pc_db - - :worker_1_port
BEGIN;
revoke grant_role2pc_user1 from grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3;
revoke grant_role2pc_user1 from grant_role2pc_user3 cascade;
COMMIT;
\c regression
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
FROM pg_auth_members
WHERE member::regrole::text in
('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7')
order by member::regrole::text, roleid::regrole::text
) t
$$);
\c grant_role2pc_db - - :worker_1_port
BEGIN;
grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION;
grant grant_role2pc_user1 to grant_role2pc_user6;
COMMIT;
\c regression - - :master_port
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
FROM pg_auth_members
WHERE member::regrole::text in
('grant_role2pc_user5','grant_role2pc_user6')
order by member::regrole::text, roleid::regrole::text
) t
$$);
revoke grant_role2pc_user1 from grant_role2pc_user5,grant_role2pc_user6;
--clean resources
DROP SCHEMA grant_role2pc;
set citus.enable_create_database_propagation to on;
DROP DATABASE grant_role2pc_db;
drop user grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7;
drop user grant_role2pc_user1;
reset citus.enable_create_database_propagation;

View File

@ -0,0 +1,72 @@
--- Test for verifying that column references (var nodes) in targets that cannot be pushed down
--- do not cause issues for the postgres planner, in particular postgres versions 16+, where the
--- varnullingrels field of a VAR node may contain relids of join relations that can make the var
--- NULL; in a rewritten distributed query without a join such relids do not have a meaning.
--- Issue #7705: [SEGFAULT] Querying distributed tables with window partition causes segmentation fault
--- https://github.com/citusdata/citus/issues/7705
CREATE SCHEMA issue_7705;
SET search_path to 'issue_7705';
SET citus.next_shard_id TO 30070000;
SET citus.shard_replication_factor TO 1;
SET citus.enable_local_execution TO ON;
CREATE TABLE t1 (id INT PRIMARY KEY);
INSERT INTO t1 VALUES (1), (2);
CREATE TABLE t2 (id INT, account_id INT, a2 INT, PRIMARY KEY(id, account_id));
INSERT INTO t2 VALUES (3, 1, 10), (4, 2, 20), (5, 1, NULL);
SELECT create_distributed_table('t1', 'id');
SELECT create_distributed_table('t2', 'account_id');
-- Test the issue seen in #7705; a target expression with
-- a window function that cannot be pushed down because the
-- partion by is not on the distribution column also includes
-- a column from the inner side of a left outer join, which
-- produces a non-empty varnullingrels set in PG 16 (and higher)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id;
SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id;
CREATE SEQUENCE test_seq START 101;
CREATE OR REPLACE FUNCTION TEST_F(int) returns INT language sql stable as $$ select $1 + 42; $$ ;
-- Issue #7705 also occurs if a target expression includes a column
-- of a distributed table that is on the inner side of a left outer
-- join and a call to nextval(), because nextval() cannot be pushed
-- down, and must be run on the coordinator
SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int)
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
SELECT t1.id, CASE nextval('test_seq') % 2 = 0 WHEN true THEN t2.a2 ELSE 1 END
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF)
SELECT t1.id, CASE nextval('test_seq') %2 = 0 WHEN true THEN t2.a2 ELSE 1 END
FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id
ORDER BY t1.id;
--- cleanup
\set VERBOSITY TERSE
DROP SCHEMA issue_7705 CASCADE;
RESET all;

View File

@ -1,188 +0,0 @@
CREATE SCHEMA metadata_sync_2pc_schema;
SET search_path TO metadata_sync_2pc_schema;
set citus.enable_create_database_propagation to on;
CREATE DATABASE metadata_sync_2pc_db;
revoke connect,temp,temporary on database metadata_sync_2pc_db from public;
\c metadata_sync_2pc_db
SHOW citus.main_db;
CREATE USER "grant_role2pc'_user1";
CREATE USER "grant_role2pc'_user2";
CREATE USER "grant_role2pc'_user3";
CREATE USER grant_role2pc_user4;
CREATE USER grant_role2pc_user5;
\c regression
select 1 from citus_remove_node('localhost', :worker_2_port);
\c metadata_sync_2pc_db
grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION;
-- This section was originally testing a scenario where a user with the 'admin option' grants the same role to another user, also with the 'admin option'.
-- However, we encountered inconsistent errors because the 'admin option' grant is executed after the grant below.
-- Once we establish the correct order of granting, we will reintroduce the 'granted by' clause.
-- For now, we are commenting out the grant below that includes 'granted by', and instead, we are adding a grant without the 'granted by' clause.
-- grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3";
grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5;
--test for grant on database
\c metadata_sync_2pc_db - - :master_port
grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1";
grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2";
grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3";
\c regression
select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']);
select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']);
select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']);
-- test for security label on role
\c metadata_sync_2pc_db - - :master_port
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE grant_role2pc_user4 IS 'citus_unclassified';
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "grant_role2pc'_user1" IS 'citus_classified';
\c regression
SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type;
SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type;
set citus.enable_create_database_propagation to on;
select 1 from citus_add_node('localhost', :worker_2_port);
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option
FROM pg_auth_members
WHERE member::regrole::text in
('"grant_role2pc''_user2"','"grant_role2pc''_user3"','grant_role2pc_user4','grant_role2pc_user5')
order by member::regrole::text
) t
$$);
select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']);
select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']);
select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']);
SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type;
SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type;
\c metadata_sync_2pc_db
revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ;
revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3";
revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3";
revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3";
revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2";
revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1";
\c regression
drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5;
--test for user operations
--test for create user
\c regression - - :master_port
select 1 from citus_remove_node('localhost', :worker_2_port);
\c metadata_sync_2pc_db - - :master_port
CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1';
\c metadata_sync_2pc_db - - :worker_1_port
CREATE USER "test_role2-needs\!escape"
WITH
SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION
LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1;
create role test_role3;
\c regression - - :master_port
select 1 from citus_add_node('localhost', :worker_2_port);
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb,
rolcanlogin, rolreplication, rolbypassrls, rolconnlimit,
(rolpassword != '') as pass_not_empty, DATE(rolvaliduntil)
FROM pg_authid
WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3')
ORDER BY rolname
) t
$$);
--test for alter user
select 1 from citus_remove_node('localhost', :worker_2_port);
\c metadata_sync_2pc_db - - :master_port
-- Test ALTER ROLE with various options
ALTER ROLE test_role1 WITH PASSWORD 'new_password1';
\c metadata_sync_2pc_db - - :worker_1_port
ALTER USER "test_role2-needs\!escape"
WITH
NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION
LIMIT 5 VALID UNTIL '2024-01-01';
\c regression - - :master_port
select 1 from citus_add_node('localhost', :worker_2_port);
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb,
rolcanlogin, rolreplication, rolbypassrls, rolconnlimit,
(rolpassword != '') as pass_not_empty, DATE(rolvaliduntil)
FROM pg_authid
WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3')
ORDER BY rolname
) t
$$);
--test for drop user
select 1 from citus_remove_node('localhost', :worker_2_port);
\c metadata_sync_2pc_db - - :worker_1_port
DROP ROLE test_role1, "test_role2-needs\!escape";
\c metadata_sync_2pc_db - - :master_port
DROP ROLE test_role3;
\c regression - - :master_port
select 1 from citus_add_node('localhost', :worker_2_port);
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb,
rolcanlogin, rolreplication, rolbypassrls, rolconnlimit,
(rolpassword != '') as pass_not_empty, DATE(rolvaliduntil)
FROM pg_authid
WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3')
ORDER BY rolname
) t
$$);
-- Clean up: drop the database on worker node 2
\c regression - - :worker_2_port
DROP ROLE if exists test_role1, "test_role2-needs\!escape", test_role3;
\c regression - - :master_port
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb,
rolcanlogin, rolreplication, rolbypassrls, rolconnlimit,
(rolpassword != '') as pass_not_empty, DATE(rolvaliduntil)
FROM pg_authid
WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3')
ORDER BY rolname
) t
$$);
set citus.enable_create_database_propagation to on;
drop database metadata_sync_2pc_db;
drop schema metadata_sync_2pc_schema;
reset citus.enable_create_database_propagation;
reset search_path;

View File

@ -1,182 +0,0 @@
CREATE SCHEMA other_databases;
SET search_path TO other_databases;
SET citus.next_shard_id TO 10231023;
CREATE DATABASE other_db1;
\c other_db1
SHOW citus.main_db;
-- check that empty citus.superuser gives error
SET citus.superuser TO '';
CREATE USER empty_superuser;
SET citus.superuser TO 'postgres';
CREATE USER other_db_user1;
CREATE USER other_db_user2;
BEGIN;
CREATE USER other_db_user3;
CREATE USER other_db_user4;
COMMIT;
BEGIN;
CREATE USER other_db_user5;
CREATE USER other_db_user6;
ROLLBACK;
BEGIN;
CREATE USER other_db_user7;
SELECT 1/0;
COMMIT;
CREATE USER other_db_user8;
\c regression
SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1;
\c - - - :worker_1_port
SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1;
\c - - - :master_port
-- some user creation commands will fail but let's make sure we try to drop them just in case
DROP USER IF EXISTS other_db_user1, other_db_user2, other_db_user3, other_db_user4, other_db_user5, other_db_user6, other_db_user7, other_db_user8;
-- Make sure non-superuser roles cannot use internal GUCs
-- but they can still create a role
CREATE USER nonsuperuser CREATEROLE;
GRANT ALL ON SCHEMA citus_internal TO nonsuperuser;
SET ROLE nonsuperuser;
SELECT citus_internal.execute_command_on_remote_nodes_as_user($$SELECT 'dangerous query'$$, 'postgres');
\c other_db1
SET citus.local_hostname TO '127.0.0.1';
SET ROLE nonsuperuser;
-- Make sure that we don't try to access pg_dist_node.
-- Otherwise, we would get the following error:
-- ERROR: cache lookup failed for pg_dist_node, called too early?
CREATE USER other_db_user9;
RESET ROLE;
RESET citus.local_hostname;
RESET ROLE;
\c regression
SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1;
\c - - - :worker_1_port
SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1;
\c - - - :master_port
REVOKE ALL ON SCHEMA citus_internal FROM nonsuperuser;
DROP USER other_db_user9, nonsuperuser;
-- test from a worker
\c - - - :worker_1_port
CREATE DATABASE worker_other_db;
\c worker_other_db
CREATE USER worker_user1;
BEGIN;
CREATE USER worker_user2;
COMMIT;
BEGIN;
CREATE USER worker_user3;
ROLLBACK;
\c regression
SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1;
\c - - - :master_port
SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1;
-- some user creation commands will fail but let's make sure we try to drop them just in case
DROP USER IF EXISTS worker_user1, worker_user2, worker_user3;
-- test creating and dropping a database from a Citus non-main database
SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$);
SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$);
SELECT pg_sleep(0.1);
\c other_db1
CREATE DATABASE other_db3;
\c regression
SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type;
\c other_db1
DROP DATABASE other_db3;
\c regression
SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type;
\c worker_other_db - - :worker_1_port
CREATE DATABASE other_db4;
\c regression
SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type;
\c worker_other_db
DROP DATABASE other_db4;
\c regression
SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type;
DROP DATABASE worker_other_db;
CREATE DATABASE other_db5;
-- disable create database propagation for the next test
SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$);
SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$);
SELECT pg_sleep(0.1);
\c other_db5 - - :worker_2_port
-- locally create a database
CREATE DATABASE local_db;
\c regression - - -
-- re-enable create database propagation
SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$);
SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$);
SELECT pg_sleep(0.1);
\c other_db5 - - :master_port
-- Test a scenario where create database fails because the database
-- already exists on another node and we don't crash etc.
CREATE DATABASE local_db;
\c regression - - -
SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result;
\c - - - :worker_2_port
-- locally drop the database for cleanup purposes
SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$);
SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$);
SELECT pg_sleep(0.1);
DROP DATABASE local_db;
SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$);
SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$);
SELECT pg_sleep(0.1);
\c - - - :master_port
DROP DATABASE other_db5;
SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$);
SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$);
SELECT pg_sleep(0.1);
DROP SCHEMA other_databases;
DROP DATABASE other_db1;

View File

@ -58,8 +58,8 @@ CREATE TABLE tenk1 (
SELECT create_distributed_table('tenk1', 'unique1'); SELECT create_distributed_table('tenk1', 'unique1');
SET citus.log_remote_commands TO on; SET citus.log_remote_commands TO on;
EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = 1000; EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = $1;
EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = 1000; EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = $1;
SET citus.log_remote_commands TO off; SET citus.log_remote_commands TO off;
-- Proper error when creating statistics without a name on a Citus table -- Proper error when creating statistics without a name on a Citus table

View File

@ -1,5 +1,41 @@
-- removing coordinator from pg_dist_node should update pg_dist_colocation -- removing coordinator from pg_dist_node should update pg_dist_colocation
SELECT master_remove_node('localhost', :master_port); SELECT master_remove_node('localhost', :master_port);
-- to silence -potentially flaky- "could not establish connection after" warnings in below test
SET client_min_messages TO ERROR;
-- to fail fast when the hostname is not resolvable, as it will be the case below
SET citus.node_connection_timeout to '1s';
BEGIN;
SET application_name TO 'new_app_name';
-- that should fail because of bad hostname & port
SELECT citus_add_node('200.200.200.200', 1, 200);
-- Since above command failed, now Postgres will need to revert the
-- application_name change made in this transaction and this will
-- happen within abort-transaction callback, so we won't be in a
-- transaction block while Postgres does that.
--
-- And when the application_name changes, Citus tries to re-assign
-- the global pid but it does so only for Citus internal backends,
-- and doing so for Citus internal backends doesn't require being
-- in a transaction block and is safe.
--
-- However, for the client external backends (like us here), Citus
-- doesn't re-assign the global pid because it's not needed and it's
-- not safe to do so outside of a transaction block. This is because,
-- it would require performing a catalog access to retrive the local
-- node id when the cached local node is invalidated like what just
-- happened here because of the failed citus_add_node() call made
-- above.
--
-- So by failing here (rather than crashing), we ensure this behavior.
ROLLBACK;
RESET client_min_messages;
RESET citus.node_connection_timeout;
-- restore coordinator for the rest of the tests -- restore coordinator for the rest of the tests
SELECT citus_set_coordinator_host('localhost', :master_port); SELECT citus_set_coordinator_host('localhost', :master_port);

View File

@ -1,106 +0,0 @@
-- Create a new database
set citus.enable_create_database_propagation to on;
CREATE DATABASE role_operations_test_db;
SET citus.superuser TO 'postgres';
-- Connect to the new database
\c role_operations_test_db
-- Test CREATE ROLE with various options
CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1';
\c role_operations_test_db - - :worker_1_port
CREATE USER "test_role2-needs\!escape"
WITH
SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION
LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1;
\c regression - - :master_port
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb,
rolcanlogin, rolreplication, rolbypassrls, rolconnlimit,
(rolpassword != '') as pass_not_empty, DATE(rolvaliduntil)
FROM pg_authid
WHERE rolname in ('test_role1', 'test_role2-needs\!escape')
ORDER BY rolname
) t
$$);
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT r.rolname
FROM pg_dist_object d
JOIN pg_roles r ON d.objid = r.oid
WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape')
order by r.rolname
) t
$$);
\c role_operations_test_db - - :master_port
-- Test ALTER ROLE with various options
ALTER ROLE test_role1 WITH PASSWORD 'new_password1';
\c role_operations_test_db - - :worker_1_port
ALTER USER "test_role2-needs\!escape"
WITH
NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION
LIMIT 5 VALID UNTIL '2024-01-01';
\c regression - - :master_port
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb,
rolcanlogin, rolreplication, rolbypassrls, rolconnlimit,
(rolpassword != '') as pass_not_empty, DATE(rolvaliduntil)
FROM pg_authid
WHERE rolname in ('test_role1', 'test_role2-needs\!escape')
ORDER BY rolname
) t
$$);
\c role_operations_test_db - - :master_port
-- Test DROP ROLE
DROP ROLE no_such_role; -- fails nicely
DROP ROLE IF EXISTS no_such_role; -- doesn't fail
CREATE ROLE new_role;
DROP ROLE IF EXISTS no_such_role, new_role; -- doesn't fail
DROP ROLE IF EXISTS test_role1, "test_role2-needs\!escape";
\c regression - - :master_port
--verify that roles and dist_object are dropped
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb,
rolcanlogin, rolreplication, rolbypassrls, rolconnlimit,
(rolpassword != '') as pass_not_empty, DATE(rolvaliduntil)
FROM pg_authid
WHERE rolname in ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role')
ORDER BY rolname
) t
$$);
select result FROM run_command_on_all_nodes($$
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT r.rolname
FROM pg_roles r
WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role')
order by r.rolname
) t
$$);
SELECT result FROM run_command_on_all_nodes($$
SELECT count(*) leaked_pg_dist_object_records_for_roles
FROM pg_dist_object LEFT JOIN pg_authid ON (objid = oid)
WHERE classid = 1260 AND oid IS NULL
$$);
-- Clean up: drop the database
set citus.enable_create_database_propagation to on;
DROP DATABASE role_operations_test_db;
reset citus.enable_create_database_propagation;

View File

@ -1,71 +0,0 @@
-- SECLABEL
--
-- Test suite for running SECURITY LABEL ON ROLE statements from non-main databases
SET citus.enable_create_database_propagation to ON;
CREATE DATABASE database1;
CREATE DATABASE database2;
\c - - - :worker_1_port
SET citus.enable_create_database_propagation to ON;
CREATE DATABASE database_w1;
\c - - - :master_port
CREATE ROLE user1;
\c database1
SHOW citus.main_db;
SHOW citus.superuser;
CREATE ROLE "user 2";
-- Set a SECURITY LABEL on a role from a non-main database
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified';
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_unclassified';
-- Check the result
\c regression
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type;
\c database1
-- Set a SECURITY LABEL on database, it should not be propagated
SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database1 IS 'citus_classified';
-- Set a SECURITY LABEL on a table, it should not be propagated
CREATE TABLE a (i int);
SECURITY LABEL ON TABLE a IS 'citus_classified';
\c regression
SELECT node_type, result FROM get_citus_tests_label_provider_labels('database1') ORDER BY node_type;
-- Check that only the SECURITY LABEL for ROLES is propagated to the non-main databases on other nodes
\c database_w1 - - :worker_1_port
SELECT provider, objtype, label, objname FROM pg_seclabels ORDER BY objname;
-- Check the result after a transaction
BEGIN;
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified';
SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database_w1 IS 'citus_classified';
COMMIT;
\c regression
SELECT node_type, result FROM get_citus_tests_label_provider_labels('database_w1') ORDER BY node_type;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type;
BEGIN;
SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified';
ROLLBACK;
SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type;
-- clean up
SET citus.enable_create_database_propagation to ON;
DROP DATABASE database1;
DROP DATABASE database2;
DROP DATABASE database_w1;
DROP ROLE user1;
DROP ROLE "user 2";
RESET citus.enable_create_database_propagation;

View File

@ -1,3 +1,5 @@
BEGIN;
SET LOCAL citus.show_shards_for_app_name_prefixes = '';
-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job" -- The following query retrieves the foreign key constraints of the table "pg_dist_background_job"
-- along with their details. This modification includes a fix for a null pointer exception that occurred -- along with their details. This modification includes a fix for a null pointer exception that occurred
-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604. -- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604.
@ -25,3 +27,4 @@ where
and ns.nspname='pg_catalog' and ns.nspname='pg_catalog'
order by order by
fns.nspname, fc.relname, a.attnum; fns.nspname, fc.relname, a.attnum;
END;

View File

@ -13,3 +13,8 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardid IN (SELECT shardid FROM pg_
SELECT * FROM pg_dist_cleanup; SELECT * FROM pg_dist_cleanup;
CALL citus_cleanup_orphaned_resources(); CALL citus_cleanup_orphaned_resources();
DROP TABLE table_with_orphaned_shards; DROP TABLE table_with_orphaned_shards;
-- Re-enable automatic shard cleanup by maintenance daemon as
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
SELECT pg_reload_conf();

View File

@ -16,6 +16,16 @@ SELECT create_distributed_table('table_with_orphaned_shards', 'a');
-- show all 32 placements are active -- show all 32 placements are active
SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='table_with_orphaned_shards'::regclass); SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='table_with_orphaned_shards'::regclass);
-- create an orphaned placement based on an existing one -- create an orphaned placement based on an existing one
--
-- But before doing that, first disable automatic shard cleanup
-- by maintenance daemon so that we can reliably test the cleanup
-- in upgrade_pg_dist_cleanup_after.sql.
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
SELECT pg_reload_conf();
SELECT pg_sleep(0.1);
INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid) INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
FROM pg_dist_placement FROM pg_dist_placement