diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 6579c52d9..4819209a3 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -72,18 +72,6 @@ ENV PATH="/home/citus/.pgenv/pgsql/bin:${PATH}" USER citus # build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions -FROM base AS pg15 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.14 -RUN rm .pgenv/src/*.tar* -RUN make -C .pgenv/src/postgresql-*/ clean -RUN make -C .pgenv/src/postgresql-*/src/include install - -# create a staging directory with all files we want to copy from our pgenv build -# we will copy the contents of the staged folder into the final image at once -RUN mkdir .pgenv-staging/ -RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ -RUN rm .pgenv-staging/config/default.conf - FROM base AS pg16 RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.10 RUN rm .pgenv/src/*.tar* @@ -198,7 +186,6 @@ RUN git clone https://github.com/so-fancy/diff-so-fancy.git \ COPY --link --from=uncrustify-builder /uncrustify/usr/ /usr/ -COPY --link --from=pg15 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ COPY --link --from=pg16 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ COPY --link --from=pg17 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ diff --git a/.gitattributes b/.gitattributes index c58445126..7b45513ac 100644 --- a/.gitattributes +++ b/.gitattributes @@ -25,7 +25,6 @@ configure -whitespace # except these exceptions... src/backend/distributed/utils/citus_outfuncs.c -citus-style -src/backend/distributed/deparser/ruleutils_15.c -citus-style src/backend/distributed/deparser/ruleutils_16.c -citus-style src/backend/distributed/deparser/ruleutils_17.c -citus-style src/backend/distributed/deparser/ruleutils_18.c -citus-style diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 98e012179..d0be0ce0d 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -32,11 +32,10 @@ jobs: style_checker_image_name: "ghcr.io/citusdata/stylechecker" style_checker_tools_version: "0.8.18" sql_snapshot_pg_version: "17.6" - image_suffix: "-va20872f" - pg15_version: '{ "major": "15", "full": "15.14" }' + image_suffix: "-dev-76e863d" pg16_version: '{ "major": "16", "full": "16.10" }' pg17_version: '{ "major": "17", "full": "17.6" }' - upgrade_pg_versions: "15.14-16.10-17.6" + upgrade_pg_versions: "16.10-17.6" steps: # Since GHA jobs need at least one step we use a noop step here. - name: Set up parameters @@ -110,7 +109,6 @@ jobs: image_suffix: - ${{ needs.params.outputs.image_suffix}} pg_version: - - ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg17_version }} runs-on: ubuntu-latest @@ -141,7 +139,6 @@ jobs: image_name: - ${{ needs.params.outputs.test_image_name }} pg_version: - - ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg17_version }} make: @@ -162,10 +159,6 @@ jobs: - check-enterprise-isolation-logicalrep-2 - check-enterprise-isolation-logicalrep-3 include: - - make: check-failure - pg_version: ${{ needs.params.outputs.pg15_version }} - suite: regress - image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-failure pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress @@ -174,10 +167,6 @@ jobs: pg_version: ${{ needs.params.outputs.pg17_version }} suite: regress image_name: ${{ needs.params.outputs.fail_test_image_name }} - - make: check-enterprise-failure - pg_version: ${{ needs.params.outputs.pg15_version }} - suite: regress - image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-enterprise-failure pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress @@ -186,10 +175,6 @@ jobs: pg_version: ${{ needs.params.outputs.pg17_version }} suite: regress image_name: ${{ needs.params.outputs.fail_test_image_name }} - - make: check-pytest - pg_version: ${{ needs.params.outputs.pg15_version }} - suite: regress - image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-pytest pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress @@ -198,10 +183,6 @@ jobs: pg_version: ${{ needs.params.outputs.pg17_version }} suite: regress image_name: ${{ needs.params.outputs.fail_test_image_name }} - - make: installcheck - suite: cdc - image_name: ${{ needs.params.outputs.test_image_name }} - pg_version: ${{ needs.params.outputs.pg15_version }} - make: installcheck suite: cdc image_name: ${{ needs.params.outputs.test_image_name }} @@ -210,10 +191,6 @@ jobs: suite: cdc image_name: ${{ needs.params.outputs.test_image_name }} pg_version: ${{ needs.params.outputs.pg17_version }} - - make: check-query-generator - pg_version: ${{ needs.params.outputs.pg15_version }} - suite: regress - image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-query-generator pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress @@ -268,7 +245,6 @@ jobs: image_name: - ${{ needs.params.outputs.fail_test_image_name }} pg_version: - - ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg17_version }} parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs @@ -315,12 +291,8 @@ jobs: fail-fast: false matrix: include: - - old_pg_major: 15 - new_pg_major: 16 - old_pg_major: 16 new_pg_major: 17 - - old_pg_major: 15 - new_pg_major: 17 env: old_pg_major: ${{ matrix.old_pg_major }} new_pg_major: ${{ matrix.new_pg_major }} @@ -376,7 +348,6 @@ jobs: fail-fast: false matrix: pg_version: - - ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg16_version }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/flaky_test_debugging.yml b/.github/workflows/flaky_test_debugging.yml index 055ba6c6a..e7e56215d 100644 --- a/.github/workflows/flaky_test_debugging.yml +++ b/.github/workflows/flaky_test_debugging.yml @@ -25,7 +25,7 @@ jobs: name: Build Citus runs-on: ubuntu-latest container: - image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }} + image: ${{ vars.build_image_name }}:${{ vars.pg16_version }}${{ vars.image_suffix }} options: --user root steps: - uses: actions/checkout@v4 @@ -55,7 +55,7 @@ jobs: name: Test flakyness runs-on: ubuntu-latest container: - image: ${{ vars.fail_test_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }} + image: ${{ vars.fail_test_image_name }}:${{ vars.pg16_version }}${{ vars.image_suffix }} options: --user root needs: [build, prepare_parallelization_matrix] diff --git a/configure b/configure index cdaf0e78b..26fd0d62f 100755 --- a/configure +++ b/configure @@ -2588,7 +2588,7 @@ fi if test "$with_pg_version_check" = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5 $as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;} -elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then +elif test "$version_num" != '16' -a "$version_num" != '17'; then as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5 else { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5 diff --git a/configure.ac b/configure.ac index c7b5ba1de..26db953f4 100644 --- a/configure.ac +++ b/configure.ac @@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check) if test "$with_pg_version_check" = no; then AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)]) -elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then +elif test "$version_num" != '16' -a "$version_num" != '17'; then AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) else AC_MSG_NOTICE([building against PostgreSQL $version_num]) diff --git a/src/backend/columnar/columnar_compression.c b/src/backend/columnar/columnar_compression.c index 2ff35da98..3af6bb031 100644 --- a/src/backend/columnar/columnar_compression.c +++ b/src/backend/columnar/columnar_compression.c @@ -25,9 +25,7 @@ #include #endif -#if PG_VERSION_NUM >= PG_VERSION_16 #include "varatt.h" -#endif #if HAVE_LIBZSTD #include diff --git a/src/backend/columnar/columnar_customscan.c b/src/backend/columnar/columnar_customscan.c index d97f4cdeb..b8a051ace 100644 --- a/src/backend/columnar/columnar_customscan.c +++ b/src/backend/columnar/columnar_customscan.c @@ -39,11 +39,10 @@ #include "optimizer/paths.h" #include "optimizer/plancat.h" #include "optimizer/restrictinfo.h" -#if PG_VERSION_NUM >= PG_VERSION_16 #include "parser/parse_relation.h" #include "parser/parsetree.h" -#endif #include "utils/builtins.h" +#include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/relcache.h" #include "utils/ruleutils.h" @@ -140,9 +139,7 @@ static List * set_deparse_context_planstate(List *dpcontext, Node *node, /* other helpers */ static List * ColumnarVarNeeded(ColumnarScanState *columnarScanState); static Bitmapset * ColumnarAttrNeeded(ScanState *ss); -#if PG_VERSION_NUM >= PG_VERSION_16 static Bitmapset * fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns); -#endif /* saved hook value in case of unload */ static set_rel_pathlist_hook_type PreviousSetRelPathlistHook = NULL; @@ -551,7 +548,7 @@ ColumnarIndexScanAdditionalCost(PlannerInfo *root, RelOptInfo *rel, * "anti-correlated" (-1) since both help us avoiding from reading the * same stripe again and again. */ - double absIndexCorrelation = float_abs(indexCorrelation); + double absIndexCorrelation = fabs(indexCorrelation); /* * To estimate the number of stripes that we need to read, we do linear @@ -670,7 +667,7 @@ CheckVarStats(PlannerInfo *root, Var *var, Oid sortop, float4 *absVarCorrelation * If the Var is not highly correlated, then the chunk's min/max bounds * will be nearly useless. */ - if (float_abs(varCorrelation) < ColumnarQualPushdownCorrelationThreshold) + if (fabs(varCorrelation) < ColumnarQualPushdownCorrelationThreshold) { if (absVarCorrelation) { @@ -678,7 +675,7 @@ CheckVarStats(PlannerInfo *root, Var *var, Oid sortop, float4 *absVarCorrelation * Report absVarCorrelation if caller wants to know why given * var is rejected. */ - *absVarCorrelation = float_abs(varCorrelation); + *absVarCorrelation = fabs(varCorrelation); } return false; } @@ -1063,9 +1060,7 @@ FindCandidateRelids(PlannerInfo *root, RelOptInfo *rel, List *joinClauses) * For the relevant PG16 commit requiring this addition: * postgres/postgres@2489d76 */ -#if PG_VERSION_NUM >= PG_VERSION_16 candidateRelids = bms_del_members(candidateRelids, root->outer_join_rels); -#endif return candidateRelids; } @@ -1394,7 +1389,6 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte, } int numberOfColumnsRead = 0; -#if PG_VERSION_NUM >= PG_VERSION_16 if (rte->perminfoindex > 0) { /* @@ -1426,9 +1420,6 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte, perminfo-> selectedCols)); } -#else - numberOfColumnsRead = bms_num_members(rte->selectedCols); -#endif int numberOfClausesPushed = list_length(allClauses); @@ -1449,8 +1440,6 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte, } -#if PG_VERSION_NUM >= PG_VERSION_16 - /* * fixup_inherited_columns * @@ -1509,9 +1498,6 @@ fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns) } -#endif - - /* * CostColumnarScan calculates the cost of scanning the columnar table. The * cost is estimated by using all stripe metadata to estimate based on the diff --git a/src/backend/columnar/columnar_metadata.c b/src/backend/columnar/columnar_metadata.c index cd62c8b0c..dc9f90c3d 100644 --- a/src/backend/columnar/columnar_metadata.c +++ b/src/backend/columnar/columnar_metadata.c @@ -62,13 +62,9 @@ #include "distributed/listutils.h" -#if PG_VERSION_NUM >= PG_VERSION_16 #include "parser/parse_relation.h" #include "storage/relfilelocator.h" #include "utils/relfilenumbermap.h" -#else -#include "utils/relfilenodemap.h" -#endif #define COLUMNAR_RELOPTION_NAMESPACE "columnar" #define SLOW_METADATA_ACCESS_WARNING \ @@ -730,7 +726,7 @@ ReadStripeSkipList(Relation rel, uint64 stripe, ScanKeyData scanKey[2]; uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel), - RelationPhysicalIdentifier_compat(rel)); + rel->rd_locator); Oid columnarChunkOid = ColumnarChunkRelationId(); Relation columnarChunk = table_open(columnarChunkOid, AccessShareLock); @@ -1277,7 +1273,7 @@ List * StripesForRelfilelocator(Relation rel) { uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel), - RelationPhysicalIdentifier_compat(rel)); + rel->rd_locator); /* * PG18 requires snapshot to be active or registered before it's used @@ -1309,7 +1305,7 @@ uint64 GetHighestUsedAddress(Relation rel) { uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel), - RelationPhysicalIdentifier_compat(rel)); + rel->rd_locator); uint64 highestUsedAddress = 0; uint64 highestUsedId = 0; @@ -1330,10 +1326,8 @@ GetHighestUsedAddress(Relation rel) Oid ColumnarRelationId(Oid relid, RelFileLocator relfilelocator) { - return OidIsValid(relid) ? relid : RelidByRelfilenumber(RelationTablespace_compat( - relfilelocator), - RelationPhysicalIdentifierNumber_compat( - relfilelocator)); + return OidIsValid(relid) ? relid : RelidByRelfilenumber(relfilelocator.spcOid, + relfilelocator.relNumber); } @@ -1624,7 +1618,7 @@ DeleteMetadataRows(Relation rel) } uint64 storageId = LookupStorageId(RelationPrecomputeOid(rel), - RelationPhysicalIdentifier_compat(rel)); + rel->rd_locator); DeleteStorageFromColumnarMetadataTable(ColumnarStripeRelationId(), Anum_columnar_stripe_storageid, @@ -1789,10 +1783,8 @@ create_estate_for_relation(Relation rel) rte->rellockmode = AccessShareLock; /* Prepare permission info on PG 16+ */ -#if PG_VERSION_NUM >= PG_VERSION_16 List *perminfos = NIL; addRTEPermissionInfo(&perminfos, rte); -#endif /* Initialize the range table, with the right signature for each PG version */ #if PG_VERSION_NUM >= PG_VERSION_18 @@ -1804,7 +1796,7 @@ create_estate_for_relation(Relation rel) perminfos, NULL /* unpruned_relids: not used by columnar */ ); -#elif PG_VERSION_NUM >= PG_VERSION_16 +#else /* PG 16–17: three-arg signature (permInfos) */ ExecInitRangeTable( @@ -1812,13 +1804,6 @@ create_estate_for_relation(Relation rel) list_make1(rte), perminfos ); -#else - - /* PG 15: two-arg signature */ - ExecInitRangeTable( - estate, - list_make1(rte) - ); #endif estate->es_output_cid = GetCurrentCommandId(true); diff --git a/src/backend/columnar/columnar_reader.c b/src/backend/columnar/columnar_reader.c index 17c4061f1..573bed39e 100644 --- a/src/backend/columnar/columnar_reader.c +++ b/src/backend/columnar/columnar_reader.c @@ -255,8 +255,7 @@ ColumnarReadFlushPendingWrites(ColumnarReadState *readState) { Assert(!readState->snapshotRegisteredByUs); - RelFileNumber relfilenumber = RelationPhysicalIdentifierNumber_compat( - RelationPhysicalIdentifier_compat(readState->relation)); + RelFileNumber relfilenumber = readState->relation->rd_locator.relNumber; FlushWriteStateForRelfilenumber(relfilenumber, GetCurrentSubTransactionId()); if (readState->snapshot == InvalidSnapshot || !IsMVCCSnapshot(readState->snapshot)) diff --git a/src/backend/columnar/columnar_storage.c b/src/backend/columnar/columnar_storage.c index 0ae6ccca3..caa6f5a68 100644 --- a/src/backend/columnar/columnar_storage.c +++ b/src/backend/columnar/columnar_storage.c @@ -169,11 +169,7 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId) } /* create two pages */ -#if PG_VERSION_NUM >= PG_VERSION_16 PGIOAlignedBlock block; -#else - PGAlignedBlock block; -#endif Page page = block.data; /* write metapage */ @@ -192,7 +188,7 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId) (char *) &metapage, sizeof(ColumnarMetapage)); phdr->pd_lower += sizeof(ColumnarMetapage); - log_newpage(RelationPhysicalIdentifierBackend_compat(&srel), MAIN_FORKNUM, + log_newpage(&srel->smgr_rlocator.locator, MAIN_FORKNUM, COLUMNAR_METAPAGE_BLOCKNO, page, true); PageSetChecksumInplace(page, COLUMNAR_METAPAGE_BLOCKNO); smgrextend(srel, MAIN_FORKNUM, COLUMNAR_METAPAGE_BLOCKNO, page, true); @@ -200,7 +196,7 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId) /* write empty page */ PageInit(page, BLCKSZ, 0); - log_newpage(RelationPhysicalIdentifierBackend_compat(&srel), MAIN_FORKNUM, + log_newpage(&srel->smgr_rlocator.locator, MAIN_FORKNUM, COLUMNAR_EMPTY_BLOCKNO, page, true); PageSetChecksumInplace(page, COLUMNAR_EMPTY_BLOCKNO); smgrextend(srel, MAIN_FORKNUM, COLUMNAR_EMPTY_BLOCKNO, page, true); diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 8271d28b2..7162d18aa 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -208,8 +208,7 @@ columnar_beginscan_extended(Relation relation, Snapshot snapshot, uint32 flags, Bitmapset *attr_needed, List *scanQual) { CheckCitusColumnarVersion(ERROR); - RelFileNumber relfilenumber = RelationPhysicalIdentifierNumber_compat( - RelationPhysicalIdentifier_compat(relation)); + RelFileNumber relfilenumber = relation->rd_locator.relNumber; /* * A memory context to use for scan-wide data, including the lazily @@ -435,8 +434,7 @@ columnar_index_fetch_begin(Relation rel) { CheckCitusColumnarVersion(ERROR); - RelFileNumber relfilenumber = RelationPhysicalIdentifierNumber_compat( - RelationPhysicalIdentifier_compat(rel)); + RelFileNumber relfilenumber = rel->rd_locator.relNumber; if (PendingWritesInUpperTransactions(relfilenumber, GetCurrentSubTransactionId())) { /* XXX: maybe we can just flush the data and continue */ @@ -865,11 +863,9 @@ columnar_relation_set_new_filelocator(Relation rel, * state. If they are equal, this is a new relation object and we don't * need to clean anything. */ - if (RelationPhysicalIdentifierNumber_compat(RelationPhysicalIdentifier_compat(rel)) != - RelationPhysicalIdentifierNumberPtr_compat(newrlocator)) + if (rel->rd_locator.relNumber != newrlocator->relNumber) { - MarkRelfilenumberDropped(RelationPhysicalIdentifierNumber_compat( - RelationPhysicalIdentifier_compat(rel)), + MarkRelfilenumberDropped(rel->rd_locator.relNumber, GetCurrentSubTransactionId()); DeleteMetadataRows(rel); @@ -892,9 +888,9 @@ static void columnar_relation_nontransactional_truncate(Relation rel) { CheckCitusColumnarVersion(ERROR); - RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel); + RelFileLocator relfilelocator = rel->rd_locator; - NonTransactionDropWriteState(RelationPhysicalIdentifierNumber_compat(relfilelocator)); + NonTransactionDropWriteState(relfilelocator.relNumber); /* Delete old relfilenode metadata */ DeleteMetadataRows(rel); @@ -1098,7 +1094,6 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params, List *indexList = RelationGetIndexList(rel); int nindexes = list_length(indexList); -#if PG_VERSION_NUM >= PG_VERSION_16 struct VacuumCutoffs cutoffs; vacuum_get_cutoffs(rel, params, &cutoffs); @@ -1140,68 +1135,6 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params, false); #endif -#else - TransactionId oldestXmin; - TransactionId freezeLimit; - MultiXactId multiXactCutoff; - - /* initialize xids */ -#if (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16) - MultiXactId oldestMxact; - vacuum_set_xid_limits(rel, - params->freeze_min_age, - params->freeze_table_age, - params->multixact_freeze_min_age, - params->multixact_freeze_table_age, - &oldestXmin, &oldestMxact, - &freezeLimit, &multiXactCutoff); - - Assert(MultiXactIdPrecedesOrEquals(multiXactCutoff, oldestMxact)); -#else - TransactionId xidFullScanLimit; - MultiXactId mxactFullScanLimit; - vacuum_set_xid_limits(rel, - params->freeze_min_age, - params->freeze_table_age, - params->multixact_freeze_min_age, - params->multixact_freeze_table_age, - &oldestXmin, &freezeLimit, &xidFullScanLimit, - &multiXactCutoff, &mxactFullScanLimit); -#endif - - Assert(TransactionIdPrecedesOrEquals(freezeLimit, oldestXmin)); - - /* - * Columnar storage doesn't hold any transaction IDs, so we can always - * just advance to the most aggressive value. - */ - TransactionId newRelFrozenXid = oldestXmin; -#if (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16) - MultiXactId newRelminMxid = oldestMxact; -#else - MultiXactId newRelminMxid = multiXactCutoff; -#endif - - double new_live_tuples = ColumnarTableTupleCount(rel); - - /* all visible pages are always 0 */ - BlockNumber new_rel_allvisible = 0; - -#if (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16) - bool frozenxid_updated; - bool minmulti_updated; - - vac_update_relstats(rel, new_rel_pages, new_live_tuples, - new_rel_allvisible, nindexes > 0, - newRelFrozenXid, newRelminMxid, - &frozenxid_updated, &minmulti_updated, false); -#else - vac_update_relstats(rel, new_rel_pages, new_live_tuples, - new_rel_allvisible, nindexes > 0, - newRelFrozenXid, newRelminMxid, false); -#endif -#endif - #if PG_VERSION_NUM >= PG_VERSION_18 pgstat_report_vacuum(RelationGetRelid(rel), rel->rd_rel->relisshared, @@ -1906,7 +1839,7 @@ TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetIt Datum *abbrev = NULL; Datum tsDatum; bool tsDatumIsNull; - if (!tuplesort_getdatum_compat(tupleSort, forwardDirection, false, + if (!tuplesort_getdatum(tupleSort, forwardDirection, false, &tsDatum, &tsDatumIsNull, abbrev)) { ItemPointerSetInvalid(&tsItemPointerData); @@ -2148,12 +2081,12 @@ ColumnarTableDropHook(Oid relid) * tableam tables storage is managed by postgres. */ Relation rel = table_open(relid, AccessExclusiveLock); - RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel); + RelFileLocator relfilelocator = rel->rd_locator; DeleteMetadataRows(rel); DeleteColumnarTableOptions(rel->rd_id, true); - MarkRelfilenumberDropped(RelationPhysicalIdentifierNumber_compat(relfilelocator), + MarkRelfilenumberDropped(relfilelocator.relNumber, GetCurrentSubTransactionId()); /* keep the lock since we did physical changes to the relation */ @@ -2571,11 +2504,7 @@ static const TableAmRoutine columnar_am_methods = { .tuple_lock = columnar_tuple_lock, .finish_bulk_insert = columnar_finish_bulk_insert, -#if PG_VERSION_NUM >= PG_VERSION_16 .relation_set_new_filelocator = columnar_relation_set_new_filelocator, -#else - .relation_set_new_filenode = columnar_relation_set_new_filelocator, -#endif .relation_nontransactional_truncate = columnar_relation_nontransactional_truncate, .relation_copy_data = columnar_relation_copy_data, .relation_copy_for_cluster = columnar_relation_copy_for_cluster, diff --git a/src/backend/columnar/columnar_writer.c b/src/backend/columnar/columnar_writer.c index e698d1a41..f86713cb8 100644 --- a/src/backend/columnar/columnar_writer.c +++ b/src/backend/columnar/columnar_writer.c @@ -35,12 +35,8 @@ #include "columnar/columnar_storage.h" #include "columnar/columnar_version_compat.h" -#if PG_VERSION_NUM >= PG_VERSION_16 #include "storage/relfilelocator.h" #include "utils/relfilenumbermap.h" -#else -#include "utils/relfilenodemap.h" -#endif struct ColumnarWriteState { @@ -103,7 +99,7 @@ ColumnarBeginWrite(Relation rel, ColumnarOptions options, TupleDesc tupleDescriptor) { - RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel); + RelFileLocator relfilelocator = rel->rd_locator; /* get comparison function pointers for each of the columns */ uint32 columnCount = tupleDescriptor->natts; diff --git a/src/backend/columnar/write_state_management.c b/src/backend/columnar/write_state_management.c index a4e0240d6..3d9ae9006 100644 --- a/src/backend/columnar/write_state_management.c +++ b/src/backend/columnar/write_state_management.c @@ -146,9 +146,7 @@ columnar_init_write_state(Relation relation, TupleDesc tupdesc, } WriteStateMapEntry *hashEntry = hash_search(WriteStateMap, - &RelationPhysicalIdentifierNumber_compat( - RelationPhysicalIdentifier_compat( - relation)), + &(relation->rd_locator.relNumber), HASH_ENTER, &found); if (!found) { diff --git a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c index dfc57f096..64a83fa06 100644 --- a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c +++ b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c @@ -1476,20 +1476,10 @@ InsertMetadataForCitusLocalTable(Oid citusLocalTableId, uint64 shardId, static void FinalizeCitusLocalTableCreation(Oid relationId) { -#if PG_VERSION_NUM >= PG_VERSION_16 - /* * PG16+ supports truncate triggers on foreign tables */ if (RegularTable(relationId) || IsForeignTable(relationId)) -#else - - /* - * If it is a foreign table, then skip creating citus truncate trigger - * as foreign tables do not support truncate triggers. - */ - if (RegularTable(relationId)) -#endif { CreateTruncateTrigger(relationId); } diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index 268694034..61e597c51 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -161,7 +161,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati pfree(collctype); } -#if PG_VERSION_NUM >= PG_VERSION_16 char *collicurules = NULL; datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collicurules, &isnull); if (!isnull) @@ -170,7 +169,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati appendStringInfo(&collationNameDef, ", rules = %s", quote_literal_cstr(collicurules)); } -#endif if (!collisdeterministic) { appendStringInfoString(&collationNameDef, ", deterministic = false"); diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index e7eaab9af..34d57d592 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -1271,17 +1271,10 @@ CreateCitusTable(Oid relationId, CitusTableType tableType, colocationId, citusTableParams.replicationModel, autoConverted); -#if PG_VERSION_NUM >= PG_VERSION_16 - /* * PG16+ supports truncate triggers on foreign tables */ if (RegularTable(relationId) || IsForeignTable(relationId)) -#else - - /* foreign tables do not support TRUNCATE trigger */ - if (RegularTable(relationId)) -#endif { CreateTruncateTrigger(relationId); } diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 3586fa2cd..91ea2c437 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -81,10 +81,7 @@ typedef struct DatabaseCollationInfo char *datctype; char *daticulocale; char *datcollversion; - -#if PG_VERSION_NUM >= PG_VERSION_16 char *daticurules; -#endif } DatabaseCollationInfo; static char * GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database @@ -853,14 +850,12 @@ GetDatabaseCollation(Oid dbOid) info.datcollversion = TextDatumGetCString(collverDatum); } -#if PG_VERSION_NUM >= PG_VERSION_16 Datum icurulesDatum = heap_getattr(tup, Anum_pg_database_daticurules, tupdesc, &isNull); if (!isNull) { info.daticurules = TextDatumGetCString(icurulesDatum); } -#endif table_close(rel, AccessShareLock); heap_freetuple(tup); @@ -954,13 +949,11 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) quote_identifier(GetLocaleProviderString( databaseForm->datlocprovider))); -#if PG_VERSION_NUM >= PG_VERSION_16 if (collInfo.daticurules != NULL) { appendStringInfo(&str, " ICU_RULES = %s", quote_identifier( collInfo.daticurules)); } -#endif return str.data; } diff --git a/src/backend/distributed/commands/domain.c b/src/backend/distributed/commands/domain.c index d62428ce4..0f09d8655 100644 --- a/src/backend/distributed/commands/domain.c +++ b/src/backend/distributed/commands/domain.c @@ -64,8 +64,7 @@ CreateDomainStmt * RecreateDomainStmt(Oid domainOid) { CreateDomainStmt *stmt = makeNode(CreateDomainStmt); - stmt->domainname = stringToQualifiedNameList_compat(format_type_be_qualified( - domainOid)); + stmt->domainname = stringToQualifiedNameList(format_type_be_qualified(domainOid), NULL); HeapTuple tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(domainOid)); if (!HeapTupleIsValid(tup)) diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index d95c53fb5..b5cf4f7c8 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -52,10 +52,7 @@ #include "distributed/resource_lock.h" #include "distributed/version_compat.h" #include "distributed/worker_manager.h" - -#if PG_VERSION_NUM >= PG_VERSION_16 #include "catalog/pg_namespace.h" -#endif /* Local functions forward declarations for helper functions */ diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index c7abe80de..8bf2947a9 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -110,10 +110,7 @@ #include "distributed/transmit.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" - -#if PG_VERSION_NUM >= PG_VERSION_16 #include "distributed/relation_utils.h" -#endif /* constant used in binary protocol */ @@ -3251,12 +3248,8 @@ CheckCopyPermissions(CopyStmt *copyStatement) RangeTblEntry *rte = (RangeTblEntry*) linitial(range_table); tupDesc = RelationGetDescr(rel); -#if PG_VERSION_NUM >= PG_VERSION_16 /* create permission info for rte */ RTEPermissionInfo *perminfo = GetFilledPermissionInfo(rel->rd_id, rte->inh, required_access); -#else - rte->requiredPerms = required_access; -#endif attnums = CopyGetAttnums(tupDesc, rel, copyStatement->attlist); foreach(cur, attnums) @@ -3265,29 +3258,17 @@ CheckCopyPermissions(CopyStmt *copyStatement) if (is_from) { -#if PG_VERSION_NUM >= PG_VERSION_16 perminfo->insertedCols = bms_add_member(perminfo->insertedCols, attno); -#else - rte->insertedCols = bms_add_member(rte->insertedCols, attno); -#endif } else { -#if PG_VERSION_NUM >= PG_VERSION_16 perminfo->selectedCols = bms_add_member(perminfo->selectedCols, attno); -#else - rte->selectedCols = bms_add_member(rte->selectedCols, attno); -#endif } } -#if PG_VERSION_NUM >= PG_VERSION_16 /* link rte to its permission info then check permissions */ rte->perminfoindex = 1; ExecCheckPermissions(list_make1(rte), list_make1(perminfo), true); -#else - ExecCheckRTPerms(range_table, true); -#endif /* TODO: Perform RLS checks once supported */ diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 24020e171..173bcd48e 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -734,7 +734,7 @@ MakeSetStatementArguments(char *configurationName, char *configurationValue) * using this function */ int gucCount = 0; - struct config_generic **gucVariables = get_guc_variables_compat(&gucCount); + struct config_generic **gucVariables = get_guc_variables(&gucCount); struct config_generic **matchingConfig = (struct config_generic **) SafeBsearch((void *) &key, @@ -851,12 +851,8 @@ GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options) if (strcmp(option->defname, "adminmembers") == 0) { -#if PG_VERSION_NUM >= PG_VERSION_16 DefElem *opt = makeDefElem("admin", (Node *) makeBoolean(true), -1); grantRoleStmt->opt = list_make1(opt); -#else - grantRoleStmt->admin_opt = true; -#endif } stmts = lappend(stmts, grantRoleStmt); @@ -916,8 +912,6 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) grantorRole->rolename = GetUserNameFromId(membership->grantor, false); grantRoleStmt->grantor = grantorRole; -#if PG_VERSION_NUM >= PG_VERSION_16 - /* inherit option is always included */ DefElem *inherit_opt; if (membership->inherit_option) @@ -943,9 +937,6 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) DefElem *set_opt = makeDefElem("set", (Node *) makeBoolean(false), -1); grantRoleStmt->opt = lappend(grantRoleStmt->opt, set_opt); } -#else - grantRoleStmt->admin_opt = membership->admin_option; -#endif stmts = lappend(stmts, grantRoleStmt); } diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c index 46cf5e602..f71f779a5 100644 --- a/src/backend/distributed/commands/truncate.c +++ b/src/backend/distributed/commands/truncate.c @@ -184,7 +184,7 @@ truncate_local_data_after_distributing_table(PG_FUNCTION_ARGS) TruncateStmt *truncateStmt = makeNode(TruncateStmt); char *relationName = generate_qualified_relation_name(relationId); - List *names = stringToQualifiedNameList_compat(relationName); + List *names = stringToQualifiedNameList(relationName, NULL); truncateStmt->relations = list_make1(makeRangeVarFromNameList(names)); truncateStmt->restart_seqs = false; truncateStmt->behavior = DROP_CASCADE; diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index b1e573638..f1fd47305 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -189,7 +189,7 @@ RecreateCompositeTypeStmt(Oid typeOid) Assert(get_typtype(typeOid) == TYPTYPE_COMPOSITE); CompositeTypeStmt *stmt = makeNode(CompositeTypeStmt); - List *names = stringToQualifiedNameList_compat(format_type_be_qualified(typeOid)); + List *names = stringToQualifiedNameList(format_type_be_qualified(typeOid), NULL); stmt->typevar = makeRangeVarFromNameList(names); stmt->coldeflist = CompositeTypeColumnDefList(typeOid); @@ -254,7 +254,7 @@ RecreateEnumStmt(Oid typeOid) Assert(get_typtype(typeOid) == TYPTYPE_ENUM); CreateEnumStmt *stmt = makeNode(CreateEnumStmt); - stmt->typeName = stringToQualifiedNameList_compat(format_type_be_qualified(typeOid)); + stmt->typeName = stringToQualifiedNameList(format_type_be_qualified(typeOid), NULL); stmt->vals = EnumValsList(typeOid); return stmt; @@ -567,8 +567,7 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress) char * GenerateBackupNameForTypeCollision(const ObjectAddress *address) { - List *names = stringToQualifiedNameList_compat(format_type_be_qualified( - address->objectId)); + List *names = stringToQualifiedNameList(format_type_be_qualified(address->objectId), NULL); RangeVar *rel = makeRangeVarFromNameList(names); char *newName = palloc0(NAMEDATALEN); diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index 08064b4b0..c8256b8da 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -43,9 +43,7 @@ typedef struct CitusVacuumParams VacOptValue truncate; VacOptValue index_cleanup; int nworkers; -#if PG_VERSION_NUM >= PG_VERSION_16 int ring_size; -#endif } CitusVacuumParams; /* @@ -353,19 +351,12 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams) } /* if no flags remain, exit early */ -#if PG_VERSION_NUM >= PG_VERSION_16 if (vacuumFlags & VACOPT_PROCESS_TOAST && vacuumFlags & VACOPT_PROCESS_MAIN) { /* process toast and process main are true by default */ if (((vacuumFlags & ~VACOPT_PROCESS_TOAST) & ~VACOPT_PROCESS_MAIN) == 0 && vacuumParams.ring_size == -1 && -#else - if (vacuumFlags & VACOPT_PROCESS_TOAST) - { - /* process toast is true by default */ - if ((vacuumFlags & ~VACOPT_PROCESS_TOAST) == 0 && -#endif vacuumParams.truncate == VACOPTVALUE_UNSPECIFIED && vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED && vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET @@ -413,7 +404,6 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams) appendStringInfoString(vacuumPrefix, "PROCESS_TOAST FALSE,"); } -#if PG_VERSION_NUM >= PG_VERSION_16 if (!(vacuumFlags & VACOPT_PROCESS_MAIN)) { appendStringInfoString(vacuumPrefix, "PROCESS_MAIN FALSE,"); @@ -433,7 +423,6 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams) { appendStringInfo(vacuumPrefix, "BUFFER_USAGE_LIMIT %d,", vacuumParams.ring_size); } -#endif if (vacuumParams.truncate != VACOPTVALUE_UNSPECIFIED) { @@ -537,13 +526,10 @@ VacuumStmtParams(VacuumStmt *vacstmt) bool full = false; bool disable_page_skipping = false; bool process_toast = true; - -#if PG_VERSION_NUM >= PG_VERSION_16 bool process_main = true; bool skip_database_stats = false; bool only_database_stats = false; params.ring_size = -1; -#endif /* Set default value */ params.index_cleanup = VACOPTVALUE_UNSPECIFIED; @@ -563,13 +549,11 @@ VacuumStmtParams(VacuumStmt *vacstmt) { skip_locked = defGetBoolean(opt); } -#if PG_VERSION_NUM >= PG_VERSION_16 else if (strcmp(opt->defname, "buffer_usage_limit") == 0) { char *vac_buffer_size = defGetString(opt); parse_int(vac_buffer_size, ¶ms.ring_size, GUC_UNIT_KB, NULL); } -#endif else if (!vacstmt->is_vacuumcmd) { ereport(ERROR, @@ -594,7 +578,6 @@ VacuumStmtParams(VacuumStmt *vacstmt) { disable_page_skipping = defGetBoolean(opt); } -#if PG_VERSION_NUM >= PG_VERSION_16 else if (strcmp(opt->defname, "process_main") == 0) { process_main = defGetBoolean(opt); @@ -607,7 +590,6 @@ VacuumStmtParams(VacuumStmt *vacstmt) { only_database_stats = defGetBoolean(opt); } -#endif else if (strcmp(opt->defname, "process_toast") == 0) { process_toast = defGetBoolean(opt); @@ -678,11 +660,9 @@ VacuumStmtParams(VacuumStmt *vacstmt) (analyze ? VACOPT_ANALYZE : 0) | (freeze ? VACOPT_FREEZE : 0) | (full ? VACOPT_FULL : 0) | -#if PG_VERSION_NUM >= PG_VERSION_16 (process_main ? VACOPT_PROCESS_MAIN : 0) | (skip_database_stats ? VACOPT_SKIP_DATABASE_STATS : 0) | (only_database_stats ? VACOPT_ONLY_DATABASE_STATS : 0) | -#endif (process_toast ? VACOPT_PROCESS_TOAST : 0) | (disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0); return params; diff --git a/src/backend/distributed/deparser/deparse_role_stmts.c b/src/backend/distributed/deparser/deparse_role_stmts.c index 61c0be246..1f1032672 100644 --- a/src/backend/distributed/deparser/deparse_role_stmts.c +++ b/src/backend/distributed/deparser/deparse_role_stmts.c @@ -400,7 +400,6 @@ DeparseGrantRoleStmt(Node *node) static void AppendRevokeAdminOptionFor(StringInfo buf, GrantRoleStmt *stmt) { -#if PG_VERSION_NUM >= PG_VERSION_16 if (!stmt->is_grant) { DefElem *opt = NULL; @@ -423,12 +422,6 @@ AppendRevokeAdminOptionFor(StringInfo buf, GrantRoleStmt *stmt) } } } -#else - if (!stmt->is_grant && stmt->admin_opt) - { - appendStringInfo(buf, "ADMIN OPTION FOR "); - } -#endif } @@ -437,7 +430,6 @@ AppendGrantWithAdminOption(StringInfo buf, GrantRoleStmt *stmt) { if (stmt->is_grant) { -#if PG_VERSION_NUM >= PG_VERSION_16 int opt_count = 0; DefElem *opt = NULL; foreach_declared_ptr(opt, stmt->opt) @@ -463,12 +455,6 @@ AppendGrantWithAdminOption(StringInfo buf, GrantRoleStmt *stmt) } } } -#else - if (stmt->admin_opt) - { - appendStringInfo(buf, " WITH ADMIN OPTION"); - } -#endif } } diff --git a/src/backend/distributed/deparser/ruleutils_15.c b/src/backend/distributed/deparser/ruleutils_15.c deleted file mode 100644 index 40c3192b3..000000000 --- a/src/backend/distributed/deparser/ruleutils_15.c +++ /dev/null @@ -1,8960 +0,0 @@ -/*------------------------------------------------------------------------- - * - * ruleutils_15.c - * Functions to convert stored expressions/querytrees back to - * source text - * - * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/distributed/deparser/ruleutils_15.c - * - * This needs to be closely in sync with the core code. - *------------------------------------------------------------------------- - */ -#include "pg_version_constants.h" - -#include "pg_config.h" - -#if (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16) - -#include "postgres.h" - -#include -#include -#include - -#include "access/amapi.h" -#include "access/htup_details.h" -#include "access/relation.h" -#include "access/sysattr.h" -#include "access/table.h" -#include "catalog/pg_aggregate.h" -#include "catalog/pg_am.h" -#include "catalog/pg_authid.h" -#include "catalog/pg_collation.h" -#include "catalog/pg_constraint.h" -#include "catalog/pg_depend.h" -#include "catalog/pg_extension.h" -#include "catalog/pg_foreign_data_wrapper.h" -#include "catalog/pg_language.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_operator.h" -#include "catalog/pg_partitioned_table.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_statistic_ext.h" -#include "catalog/pg_trigger.h" -#include "catalog/pg_type.h" -#include "commands/defrem.h" -#include "commands/extension.h" -#include "commands/tablespace.h" -#include "common/keywords.h" -#include "distributed/citus_nodefuncs.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/multi_router_planner.h" -#include "distributed/namespace_utils.h" -#include "executor/spi.h" -#include "foreign/foreign.h" -#include "funcapi.h" -#include "mb/pg_wchar.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/pathnodes.h" -#include "optimizer/optimizer.h" -#include "parser/parse_node.h" -#include "parser/parse_agg.h" -#include "parser/parse_func.h" -#include "parser/parse_oper.h" -#include "parser/parse_relation.h" -#include "parser/parser.h" -#include "parser/parsetree.h" -#include "rewrite/rewriteHandler.h" -#include "rewrite/rewriteManip.h" -#include "rewrite/rewriteSupport.h" -#include "utils/array.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/hsearch.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/ruleutils.h" -#include "utils/snapmgr.h" -#include "utils/syscache.h" -#include "utils/typcache.h" -#include "utils/varlena.h" -#include "utils/xml.h" - - -/* ---------- - * Pretty formatting constants - * ---------- - */ - -/* Indent counts */ -#define PRETTYINDENT_STD 8 -#define PRETTYINDENT_JOIN 4 -#define PRETTYINDENT_VAR 4 - -#define PRETTYINDENT_LIMIT 40 /* wrap limit */ - -/* Pretty flags */ -#define PRETTYFLAG_PAREN 0x0001 -#define PRETTYFLAG_INDENT 0x0002 - -/* Default line length for pretty-print wrapping: 0 means wrap always */ -#define WRAP_COLUMN_DEFAULT 0 - -/* macros to test if pretty action needed */ -#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN) -#define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT) - - -/* ---------- - * Local data types - * ---------- - */ - -/* Context info needed for invoking a recursive querytree display routine */ -typedef struct -{ - StringInfo buf; /* output buffer to append to */ - List *namespaces; /* List of deparse_namespace nodes */ - TupleDesc resultDesc; /* if top level of a view, the view's tupdesc */ - List *targetList; /* Current query level's SELECT targetlist */ - List *windowClause; /* Current query level's WINDOW clause */ - int prettyFlags; /* enabling of pretty-print functions */ - int wrapColumn; /* max line length, or -1 for no limit */ - int indentLevel; /* current indent level for prettyprint */ - bool varprefix; /* true to print prefixes on Vars */ - Oid distrelid; /* the distributed table being modified, if valid */ - int64 shardid; /* a distributed table's shardid, if positive */ - bool colNamesVisible; /* do we care about output column names? */ - bool inGroupBy; /* deparsing GROUP BY clause? */ - bool varInOrderBy; /* deparsing simple Var in ORDER BY? */ - Bitmapset *appendparents; /* if not null, map child Vars of these relids - * back to the parent rel */ -} deparse_context; - -/* - * Each level of query context around a subtree needs a level of Var namespace. - * A Var having varlevelsup=N refers to the N'th item (counting from 0) in - * the current context's namespaces list. - * - * The rangetable is the list of actual RTEs from the query tree, and the - * cte list is the list of actual CTEs. - * - * rtable_names holds the alias name to be used for each RTE (either a C - * string, or NULL for nameless RTEs such as unnamed joins). - * rtable_columns holds the column alias names to be used for each RTE. - * - * In some cases we need to make names of merged JOIN USING columns unique - * across the whole query, not only per-RTE. If so, unique_using is true - * and using_names is a list of C strings representing names already assigned - * to USING columns. - * - * When deparsing plan trees, there is always just a single item in the - * deparse_namespace list (since a plan tree never contains Vars with - * varlevelsup > 0). We store the PlanState node that is the immediate - * parent of the expression to be deparsed, as well as a list of that - * PlanState's ancestors. In addition, we store its outer and inner subplan - * state nodes, as well as their plan nodes' targetlists, and the index tlist - * if the current plan node might contain INDEX_VAR Vars. (These fields could - * be derived on-the-fly from the current PlanState, but it seems notationally - * clearer to set them up as separate fields.) - */ -typedef struct -{ - List *rtable; /* List of RangeTblEntry nodes */ - List *rtable_names; /* Parallel list of names for RTEs */ - List *rtable_columns; /* Parallel list of deparse_columns structs */ - List *subplans; /* List of Plan trees for SubPlans */ - List *ctes; /* List of CommonTableExpr nodes */ - AppendRelInfo **appendrels; /* Array of AppendRelInfo nodes, or NULL */ - /* Workspace for column alias assignment: */ - bool unique_using; /* Are we making USING names globally unique */ - List *using_names; /* List of assigned names for USING columns */ - /* Remaining fields are used only when deparsing a Plan tree: */ - Plan *plan; /* immediate parent of current expression */ - List *ancestors; /* ancestors of planstate */ - Plan *outer_plan; /* outer subnode, or NULL if none */ - Plan *inner_plan; /* inner subnode, or NULL if none */ - List *outer_tlist; /* referent for OUTER_VAR Vars */ - List *inner_tlist; /* referent for INNER_VAR Vars */ - List *index_tlist; /* referent for INDEX_VAR Vars */ - /* Special namespace representing a function signature: */ - char *funcname; - int numargs; - char **argnames; -} deparse_namespace; - -/* Callback signature for resolve_special_varno() */ -typedef void (*rsv_callback) (Node *node, deparse_context *context, - void *callback_arg); - -/* - * Per-relation data about column alias names. - * - * Selecting aliases is unreasonably complicated because of the need to dump - * rules/views whose underlying tables may have had columns added, deleted, or - * renamed since the query was parsed. We must nonetheless print the rule/view - * in a form that can be reloaded and will produce the same results as before. - * - * For each RTE used in the query, we must assign column aliases that are - * unique within that RTE. SQL does not require this of the original query, - * but due to factors such as *-expansion we need to be able to uniquely - * reference every column in a decompiled query. As long as we qualify all - * column references, per-RTE uniqueness is sufficient for that. - * - * However, we can't ensure per-column name uniqueness for unnamed join RTEs, - * since they just inherit column names from their input RTEs, and we can't - * rename the columns at the join level. Most of the time this isn't an issue - * because we don't need to reference the join's output columns as such; we - * can reference the input columns instead. That approach can fail for merged - * JOIN USING columns, however, so when we have one of those in an unnamed - * join, we have to make that column's alias globally unique across the whole - * query to ensure it can be referenced unambiguously. - * - * Another problem is that a JOIN USING clause requires the columns to be - * merged to have the same aliases in both input RTEs, and that no other - * columns in those RTEs or their children conflict with the USING names. - * To handle that, we do USING-column alias assignment in a recursive - * traversal of the query's jointree. When descending through a JOIN with - * USING, we preassign the USING column names to the child columns, overriding - * other rules for column alias assignment. We also mark each RTE with a list - * of all USING column names selected for joins containing that RTE, so that - * when we assign other columns' aliases later, we can avoid conflicts. - * - * Another problem is that if a JOIN's input tables have had columns added or - * deleted since the query was parsed, we must generate a column alias list - * for the join that matches the current set of input columns --- otherwise, a - * change in the number of columns in the left input would throw off matching - * of aliases to columns of the right input. Thus, positions in the printable - * column alias list are not necessarily one-for-one with varattnos of the - * JOIN, so we need a separate new_colnames[] array for printing purposes. - */ -typedef struct -{ - /* - * colnames is an array containing column aliases to use for columns that - * existed when the query was parsed. Dropped columns have NULL entries. - * This array can be directly indexed by varattno to get a Var's name. - * - * Non-NULL entries are guaranteed unique within the RTE, *except* when - * this is for an unnamed JOIN RTE. In that case we merely copy up names - * from the two input RTEs. - * - * During the recursive descent in set_using_names(), forcible assignment - * of a child RTE's column name is represented by pre-setting that element - * of the child's colnames array. So at that stage, NULL entries in this - * array just mean that no name has been preassigned, not necessarily that - * the column is dropped. - */ - int num_cols; /* length of colnames[] array */ - char **colnames; /* array of C strings and NULLs */ - - /* - * new_colnames is an array containing column aliases to use for columns - * that would exist if the query was re-parsed against the current - * definitions of its base tables. This is what to print as the column - * alias list for the RTE. This array does not include dropped columns, - * but it will include columns added since original parsing. Indexes in - * it therefore have little to do with current varattno values. As above, - * entries are unique unless this is for an unnamed JOIN RTE. (In such an - * RTE, we never actually print this array, but we must compute it anyway - * for possible use in computing column names of upper joins.) The - * parallel array is_new_col marks which of these columns are new since - * original parsing. Entries with is_new_col false must match the - * non-NULL colnames entries one-for-one. - */ - int num_new_cols; /* length of new_colnames[] array */ - char **new_colnames; /* array of C strings */ - bool *is_new_col; /* array of bool flags */ - - /* This flag tells whether we should actually print a column alias list */ - bool printaliases; - - /* This list has all names used as USING names in joins above this RTE */ - List *parentUsing; /* names assigned to parent merged columns */ - - /* - * If this struct is for a JOIN RTE, we fill these fields during the - * set_using_names() pass to describe its relationship to its child RTEs. - * - * leftattnos and rightattnos are arrays with one entry per existing - * output column of the join (hence, indexable by join varattno). For a - * simple reference to a column of the left child, leftattnos[i] is the - * child RTE's attno and rightattnos[i] is zero; and conversely for a - * column of the right child. But for merged columns produced by JOIN - * USING/NATURAL JOIN, both leftattnos[i] and rightattnos[i] are nonzero. - * Also, if the column has been dropped, both are zero. - * - * If it's a JOIN USING, usingNames holds the alias names selected for the - * merged columns (these might be different from the original USING list, - * if we had to modify names to achieve uniqueness). - */ - int leftrti; /* rangetable index of left child */ - int rightrti; /* rangetable index of right child */ - int *leftattnos; /* left-child varattnos of join cols, or 0 */ - int *rightattnos; /* right-child varattnos of join cols, or 0 */ - List *usingNames; /* names assigned to merged columns */ -} deparse_columns; - -/* This macro is analogous to rt_fetch(), but for deparse_columns structs */ -#define deparse_columns_fetch(rangetable_index, dpns) \ - ((deparse_columns *) list_nth((dpns)->rtable_columns, (rangetable_index)-1)) - -/* - * Entry in set_rtable_names' hash table - */ -typedef struct -{ - char name[NAMEDATALEN]; /* Hash key --- must be first */ - int counter; /* Largest addition used so far for name */ -} NameHashEntry; - - -/* ---------- - * Local functions - * - * Most of these functions used to use fixed-size buffers to build their - * results. Now, they take an (already initialized) StringInfo object - * as a parameter, and append their text output to its contents. - * ---------- - */ -static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, - Bitmapset *rels_used); -static void set_deparse_for_query(deparse_namespace *dpns, Query *query, - List *parent_namespaces); -static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode); -static void set_using_names(deparse_namespace *dpns, Node *jtnode, - List *parentUsing); -static void set_relation_column_names(deparse_namespace *dpns, - RangeTblEntry *rte, - deparse_columns *colinfo); -static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo); -static bool colname_is_unique(const char *colname, deparse_namespace *dpns, - deparse_columns *colinfo); -static char *make_colname_unique(char *colname, deparse_namespace *dpns, - deparse_columns *colinfo); -static void expand_colnames_array_to(deparse_columns *colinfo, int n); -static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, - deparse_columns *colinfo); -static char *get_rtable_name(int rtindex, deparse_context *context); -static void set_deparse_plan(deparse_namespace *dpns, Plan *plan); -static Plan *find_recursive_union(deparse_namespace *dpns, - WorkTableScan *wtscan); -static void push_child_plan(deparse_namespace *dpns, Plan *plan, - deparse_namespace *save_dpns); -static void pop_child_plan(deparse_namespace *dpns, - deparse_namespace *save_dpns); -static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, - deparse_namespace *save_dpns); -static void pop_ancestor_plan(deparse_namespace *dpns, - deparse_namespace *save_dpns); -static void get_query_def(Query *query, StringInfo buf, List *parentnamespace, - TupleDesc resultDesc, bool colNamesVisible, - int prettyFlags, int wrapColumn, int startIndent); -static void get_query_def_extended(Query *query, StringInfo buf, - List *parentnamespace, Oid distrelid, int64 shardid, - TupleDesc resultDesc, bool colNamesVisible, - int prettyFlags, int wrapColumn, - int startIndent); -static void get_values_def(List *values_lists, deparse_context *context); -static void get_with_clause(Query *query, deparse_context *context); -static void get_select_query_def(Query *query, deparse_context *context); -static void get_insert_query_def(Query *query, deparse_context *context); -static void get_update_query_def(Query *query, deparse_context *context); -static void get_update_query_targetlist_def(Query *query, List *targetList, - deparse_context *context, - RangeTblEntry *rte); -static void get_delete_query_def(Query *query, deparse_context *context); -static void get_merge_query_def(Query *query, deparse_context *context); -static void get_utility_query_def(Query *query, deparse_context *context); -static void get_basic_select_query(Query *query, deparse_context *context); -static void get_target_list(List *targetList, deparse_context *context); -static void get_setop_query(Node *setOp, Query *query, - deparse_context *context); -static Node *get_rule_sortgroupclause(Index ref, List *tlist, - bool force_colno, - deparse_context *context); -static void get_rule_groupingset(GroupingSet *gset, List *targetlist, - bool omit_parens, deparse_context *context); -static void get_rule_orderby(List *orderList, List *targetList, - bool force_colno, deparse_context *context); -static void get_rule_windowclause(Query *query, deparse_context *context); -static void get_rule_windowspec(WindowClause *wc, List *targetList, - deparse_context *context); -static char *get_variable(Var *var, int levelsup, bool istoplevel, - deparse_context *context); -static void get_special_variable(Node *node, deparse_context *context, - void *callback_arg); -static void resolve_special_varno(Node *node, deparse_context *context, - rsv_callback callback, void *callback_arg); -static Node *find_param_referent(Param *param, deparse_context *context, - deparse_namespace **dpns_p, ListCell **ancestor_cell_p); -static void get_parameter(Param *param, deparse_context *context); -static const char *get_simple_binary_op_name(OpExpr *expr); -static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags); -static void appendContextKeyword(deparse_context *context, const char *str, - int indentBefore, int indentAfter, int indentPlus); -static void removeStringInfoSpaces(StringInfo str); -static void get_rule_expr(Node *node, deparse_context *context, - bool showimplicit); -static void get_rule_expr_toplevel(Node *node, deparse_context *context, - bool showimplicit); -static void get_rule_list_toplevel(List *lst, deparse_context *context, - bool showimplicit); -static void get_rule_expr_funccall(Node *node, deparse_context *context, - bool showimplicit); -static bool looks_like_function(Node *node); -static void get_oper_expr(OpExpr *expr, deparse_context *context); -static void get_func_expr(FuncExpr *expr, deparse_context *context, - bool showimplicit); -static void get_proc_expr(CallStmt *stmt, deparse_context *context, - bool showimplicit); -static void get_agg_expr(Aggref *aggref, deparse_context *context, - Aggref *original_aggref); -static void get_agg_combine_expr(Node *node, deparse_context *context, - void *callback_arg); -static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context); -static bool get_func_sql_syntax(FuncExpr *expr, deparse_context *context); -static void get_coercion_expr(Node *arg, deparse_context *context, - Oid resulttype, int32 resulttypmod, - Node *parentNode); -static void get_const_expr(Const *constval, deparse_context *context, - int showtype); -static void get_const_collation(Const *constval, deparse_context *context); -static void simple_quote_literal(StringInfo buf, const char *val); -static void get_sublink_expr(SubLink *sublink, deparse_context *context); -static void get_tablefunc(TableFunc *tf, deparse_context *context, - bool showimplicit); -static void get_from_clause(Query *query, const char *prefix, - deparse_context *context); -static void get_from_clause_item(Node *jtnode, Query *query, - deparse_context *context); -static void get_column_alias_list(deparse_columns *colinfo, - deparse_context *context); -static void get_from_clause_coldeflist(RangeTblFunction *rtfunc, - deparse_columns *colinfo, - deparse_context *context); -static void get_tablesample_def(TableSampleClause *tablesample, - deparse_context *context); -static void get_opclass_name(Oid opclass, Oid actual_datatype, - StringInfo buf); -static Node *processIndirection(Node *node, deparse_context *context); -static void printSubscripts(SubscriptingRef *aref, deparse_context *context); -static char *get_relation_name(Oid relid); -static char *generate_relation_or_shard_name(Oid relid, Oid distrelid, - int64 shardid, List *namespaces); -static char *generate_rte_shard_name(RangeTblEntry *rangeTableEntry); -static char *generate_fragment_name(char *schemaName, char *tableName); -static char *generate_function_name(Oid funcid, int nargs, - List *argnames, Oid *argtypes, - bool has_variadic, bool *use_variadic_p, - bool inGroupBy); -static List *get_insert_column_names_list(List *targetList, StringInfo buf, deparse_context *context, RangeTblEntry *rte); - -#define only_marker(rte) ((rte)->inh ? "" : "ONLY ") - - - -/* - * pg_get_query_def parses back one query tree, and outputs the resulting query - * string into given buffer. - */ -void -pg_get_query_def(Query *query, StringInfo buffer) -{ - get_query_def(query, buffer, NIL, NULL, false, 0, WRAP_COLUMN_DEFAULT, 0); -} - -/* - * get_merged_argument_list merges both the IN and OUT arguments lists into one and - * also eliminates the INOUT duplicates(present in both the lists). After merging both - * the lists, it returns all the named-arguments in a list(mergedNamedArgList) along - * with their types(mergedNamedArgTypes), final argument list(mergedArgumentList), and - * the total number of arguments(totalArguments). - */ -bool -get_merged_argument_list(CallStmt *stmt, List **mergedNamedArgList, - Oid **mergedNamedArgTypes, - List **mergedArgumentList, - int *totalArguments) -{ - - Oid functionOid = stmt->funcexpr->funcid; - List *namedArgList = NIL; - List *finalArgumentList = NIL; - Oid *finalArgTypes; - Oid *argTypes = NULL; - char *argModes = NULL; - char **argNames = NULL; - int argIndex = 0; - - HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionOid)); - if (!HeapTupleIsValid(proctup)) - { - elog(ERROR, "cache lookup failed for function %u", functionOid); - } - - int defArgs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes); - ReleaseSysCache(proctup); - - if (argModes == NULL) - { - /* No OUT arguments */ - return false; - } - - /* - * Passed arguments Includes IN, OUT, INOUT (in both the lists) and VARIADIC arguments, - * which means INOUT arguments are double counted. - */ - int numberOfArgs = list_length(stmt->funcexpr->args) + list_length(stmt->outargs); - int totalInoutArgs = 0; - - /* Let's count INOUT arguments from the defined number of arguments */ - for (argIndex=0; argIndex < defArgs; ++argIndex) - { - if (argModes[argIndex] == PROARGMODE_INOUT) - totalInoutArgs++; - } - - /* Remove the duplicate INOUT counting */ - numberOfArgs = numberOfArgs - totalInoutArgs; - finalArgTypes = palloc0(sizeof(Oid) * numberOfArgs); - - ListCell *inArgCell = list_head(stmt->funcexpr->args); - ListCell *outArgCell = list_head(stmt->outargs); - - for (argIndex=0; argIndex < numberOfArgs; ++argIndex) - { - switch (argModes[argIndex]) - { - case PROARGMODE_IN: - case PROARGMODE_VARIADIC: - { - Node *arg = (Node *) lfirst(inArgCell); - - if (IsA(arg, NamedArgExpr)) - namedArgList = lappend(namedArgList, ((NamedArgExpr *) arg)->name); - finalArgTypes[argIndex] = exprType(arg); - finalArgumentList = lappend(finalArgumentList, arg); - inArgCell = lnext(stmt->funcexpr->args, inArgCell); - break; - } - - case PROARGMODE_OUT: - { - Node *arg = (Node *) lfirst(outArgCell); - - if (IsA(arg, NamedArgExpr)) - namedArgList = lappend(namedArgList, ((NamedArgExpr *) arg)->name); - finalArgTypes[argIndex] = exprType(arg); - finalArgumentList = lappend(finalArgumentList, arg); - outArgCell = lnext(stmt->outargs, outArgCell); - break; - } - - case PROARGMODE_INOUT: - { - Node *arg = (Node *) lfirst(inArgCell); - - if (IsA(arg, NamedArgExpr)) - namedArgList = lappend(namedArgList, ((NamedArgExpr *) arg)->name); - finalArgTypes[argIndex] = exprType(arg); - finalArgumentList = lappend(finalArgumentList, arg); - inArgCell = lnext(stmt->funcexpr->args, inArgCell); - outArgCell = lnext(stmt->outargs, outArgCell); - break; - } - - case PROARGMODE_TABLE: - default: - { - elog(ERROR, "Unhandled procedure argument mode[%d]", argModes[argIndex]); - break; - } - } - } - - /* - * After eliminating INOUT duplicates and merging OUT arguments, we now - * have the final list of arguments. - */ - if (defArgs != list_length(finalArgumentList)) - { - elog(ERROR, "Insufficient number of args passed[%d] for function[%s]", - list_length(finalArgumentList), - get_func_name(functionOid)); - } - - if (list_length(finalArgumentList) > FUNC_MAX_ARGS) - { - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg("too many arguments[%d] for function[%s]", - list_length(finalArgumentList), - get_func_name(functionOid)))); - } - - *mergedNamedArgList = namedArgList; - *mergedNamedArgTypes = finalArgTypes; - *mergedArgumentList = finalArgumentList; - *totalArguments = numberOfArgs; - - return true; -} - -/* - * pg_get_rule_expr deparses an expression and returns the result as a string. - */ -char * -pg_get_rule_expr(Node *expression) -{ - bool showImplicitCasts = true; - deparse_context context; - StringInfo buffer = makeStringInfo(); - - /* - * Set search_path to NIL so that all objects outside of pg_catalog will be - * schema-prefixed. pg_catalog will be added automatically when we call - * PushEmptySearchPath(), since we set addCatalog to true; - */ - int saveNestLevel = PushEmptySearchPath(); - - context.buf = buffer; - context.namespaces = NIL; - context.resultDesc = NULL; - context.targetList = NIL; - context.windowClause = NIL; - context.varprefix = false; - context.prettyFlags = 0; - context.wrapColumn = WRAP_COLUMN_DEFAULT; - context.indentLevel = 0; - context.colNamesVisible = true; - context.inGroupBy = false; - context.varInOrderBy = false; - context.distrelid = InvalidOid; - context.shardid = INVALID_SHARD_ID; - - get_rule_expr(expression, &context, showImplicitCasts); - - /* revert back to original search_path */ - PopEmptySearchPath(saveNestLevel); - - return buffer->data; -} - -/* - * set_rtable_names: select RTE aliases to be used in printing a query - * - * We fill in dpns->rtable_names with a list of names that is one-for-one with - * the already-filled dpns->rtable list. Each RTE name is unique among those - * in the new namespace plus any ancestor namespaces listed in - * parent_namespaces. - * - * If rels_used isn't NULL, only RTE indexes listed in it are given aliases. - * - * Note that this function is only concerned with relation names, not column - * names. - */ -static void -set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, - Bitmapset *rels_used) -{ - HASHCTL hash_ctl; - HTAB *names_hash; - NameHashEntry *hentry; - bool found; - int rtindex; - ListCell *lc; - - dpns->rtable_names = NIL; - /* nothing more to do if empty rtable */ - if (dpns->rtable == NIL) - return; - - /* - * We use a hash table to hold known names, so that this process is O(N) - * not O(N^2) for N names. - */ - hash_ctl.keysize = NAMEDATALEN; - hash_ctl.entrysize = sizeof(NameHashEntry); - hash_ctl.hcxt = CurrentMemoryContext; - names_hash = hash_create("set_rtable_names names", - list_length(dpns->rtable), - &hash_ctl, - HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); - - /* Preload the hash table with names appearing in parent_namespaces */ - foreach(lc, parent_namespaces) - { - deparse_namespace *olddpns = (deparse_namespace *) lfirst(lc); - ListCell *lc2; - - foreach(lc2, olddpns->rtable_names) - { - char *oldname = (char *) lfirst(lc2); - - if (oldname == NULL) - continue; - hentry = (NameHashEntry *) hash_search(names_hash, - oldname, - HASH_ENTER, - &found); - /* we do not complain about duplicate names in parent namespaces */ - hentry->counter = 0; - } - } - - /* Now we can scan the rtable */ - rtindex = 1; - foreach(lc, dpns->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - char *refname; - - /* Just in case this takes an unreasonable amount of time ... */ - CHECK_FOR_INTERRUPTS(); - - if (rels_used && !bms_is_member(rtindex, rels_used)) - { - /* Ignore unreferenced RTE */ - refname = NULL; - } - else if (rte->alias) - { - /* If RTE has a user-defined alias, prefer that */ - refname = rte->alias->aliasname; - } - else if (rte->rtekind == RTE_RELATION) - { - /* Use the current actual name of the relation */ - refname = get_rel_name(rte->relid); - } - else if (rte->rtekind == RTE_JOIN) - { - /* Unnamed join has no refname */ - refname = NULL; - } - else - { - /* Otherwise use whatever the parser assigned */ - refname = rte->eref->aliasname; - } - - /* - * If the selected name isn't unique, append digits to make it so, and - * make a new hash entry for it once we've got a unique name. For a - * very long input name, we might have to truncate to stay within - * NAMEDATALEN. - */ - if (refname) - { - hentry = (NameHashEntry *) hash_search(names_hash, - refname, - HASH_ENTER, - &found); - if (found) - { - /* Name already in use, must choose a new one */ - int refnamelen = strlen(refname); - char *modname = (char *) palloc(refnamelen + 16); - NameHashEntry *hentry2; - - do - { - hentry->counter++; - for (;;) - { - memcpy(modname, refname, refnamelen); - sprintf(modname + refnamelen, "_%d", hentry->counter); - if (strlen(modname) < NAMEDATALEN) - break; - /* drop chars from refname to keep all the digits */ - refnamelen = pg_mbcliplen(refname, refnamelen, - refnamelen - 1); - } - hentry2 = (NameHashEntry *) hash_search(names_hash, - modname, - HASH_ENTER, - &found); - } while (found); - hentry2->counter = 0; /* init new hash entry */ - refname = modname; - } - else - { - /* Name not previously used, need only initialize hentry */ - hentry->counter = 0; - } - } - - dpns->rtable_names = lappend(dpns->rtable_names, refname); - rtindex++; - } - - hash_destroy(names_hash); -} - -/* - * set_deparse_for_query: set up deparse_namespace for deparsing a Query tree - * - * For convenience, this is defined to initialize the deparse_namespace struct - * from scratch. - */ -static void -set_deparse_for_query(deparse_namespace *dpns, Query *query, - List *parent_namespaces) -{ - ListCell *lc; - ListCell *lc2; - - /* Initialize *dpns and fill rtable/ctes links */ - memset(dpns, 0, sizeof(deparse_namespace)); - dpns->rtable = query->rtable; - dpns->subplans = NIL; - dpns->ctes = query->cteList; - dpns->appendrels = NULL; - - /* Assign a unique relation alias to each RTE */ - set_rtable_names(dpns, parent_namespaces, NULL); - - /* Initialize dpns->rtable_columns to contain zeroed structs */ - dpns->rtable_columns = NIL; - while (list_length(dpns->rtable_columns) < list_length(dpns->rtable)) - dpns->rtable_columns = lappend(dpns->rtable_columns, - palloc0(sizeof(deparse_columns))); - - /* If it's a utility query, it won't have a jointree */ - if (query->jointree) - { - /* Detect whether global uniqueness of USING names is needed */ - dpns->unique_using = - has_dangerous_join_using(dpns, (Node *) query->jointree); - - /* - * Select names for columns merged by USING, via a recursive pass over - * the query jointree. - */ - set_using_names(dpns, (Node *) query->jointree, NIL); - } - - /* - * Now assign remaining column aliases for each RTE. We do this in a - * linear scan of the rtable, so as to process RTEs whether or not they - * are in the jointree (we mustn't miss NEW.*, INSERT target relations, - * etc). JOIN RTEs must be processed after their children, but this is - * okay because they appear later in the rtable list than their children - * (cf Asserts in identify_join_columns()). - */ - forboth(lc, dpns->rtable, lc2, dpns->rtable_columns) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - deparse_columns *colinfo = (deparse_columns *) lfirst(lc2); - - if (rte->rtekind == RTE_JOIN) - set_join_column_names(dpns, rte, colinfo); - else - set_relation_column_names(dpns, rte, colinfo); - } -} - -/* - * has_dangerous_join_using: search jointree for unnamed JOIN USING - * - * Merged columns of a JOIN USING may act differently from either of the input - * columns, either because they are merged with COALESCE (in a FULL JOIN) or - * because an implicit coercion of the underlying input column is required. - * In such a case the column must be referenced as a column of the JOIN not as - * a column of either input. And this is problematic if the join is unnamed - * (alias-less): we cannot qualify the column's name with an RTE name, since - * there is none. (Forcibly assigning an alias to the join is not a solution, - * since that will prevent legal references to tables below the join.) - * To ensure that every column in the query is unambiguously referenceable, - * we must assign such merged columns names that are globally unique across - * the whole query, aliasing other columns out of the way as necessary. - * - * Because the ensuing re-aliasing is fairly damaging to the readability of - * the query, we don't do this unless we have to. So, we must pre-scan - * the join tree to see if we have to, before starting set_using_names(). - */ -static bool -has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode) -{ - if (IsA(jtnode, RangeTblRef)) - { - /* nothing to do here */ - } - else if (IsA(jtnode, FromExpr)) - { - FromExpr *f = (FromExpr *) jtnode; - ListCell *lc; - - foreach(lc, f->fromlist) - { - if (has_dangerous_join_using(dpns, (Node *) lfirst(lc))) - return true; - } - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - - /* Is it an unnamed JOIN with USING? */ - if (j->alias == NULL && j->usingClause) - { - /* - * Yes, so check each join alias var to see if any of them are not - * simple references to underlying columns. If so, we have a - * dangerous situation and must pick unique aliases. - */ - RangeTblEntry *jrte = rt_fetch(j->rtindex, dpns->rtable); - - /* We need only examine the merged columns */ - for (int i = 0; i < jrte->joinmergedcols; i++) - { - Node *aliasvar = list_nth(jrte->joinaliasvars, i); - - if (!IsA(aliasvar, Var)) - return true; - } - } - - /* Nope, but inspect children */ - if (has_dangerous_join_using(dpns, j->larg)) - return true; - if (has_dangerous_join_using(dpns, j->rarg)) - return true; - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); - return false; -} - -/* - * set_using_names: select column aliases to be used for merged USING columns - * - * We do this during a recursive descent of the query jointree. - * dpns->unique_using must already be set to determine the global strategy. - * - * Column alias info is saved in the dpns->rtable_columns list, which is - * assumed to be filled with pre-zeroed deparse_columns structs. - * - * parentUsing is a list of all USING aliases assigned in parent joins of - * the current jointree node. (The passed-in list must not be modified.) - */ -static void -set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing) -{ - if (IsA(jtnode, RangeTblRef)) - { - /* nothing to do now */ - } - else if (IsA(jtnode, FromExpr)) - { - FromExpr *f = (FromExpr *) jtnode; - ListCell *lc; - - foreach(lc, f->fromlist) - set_using_names(dpns, (Node *) lfirst(lc), parentUsing); - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - RangeTblEntry *rte = rt_fetch(j->rtindex, dpns->rtable); - deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); - int *leftattnos; - int *rightattnos; - deparse_columns *leftcolinfo; - deparse_columns *rightcolinfo; - int i; - ListCell *lc; - - /* Get info about the shape of the join */ - identify_join_columns(j, rte, colinfo); - leftattnos = colinfo->leftattnos; - rightattnos = colinfo->rightattnos; - - /* Look up the not-yet-filled-in child deparse_columns structs */ - leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); - rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); - - /* - * If this join is unnamed, then we cannot substitute new aliases at - * this level, so any name requirements pushed down to here must be - * pushed down again to the children. - */ - if (rte->alias == NULL) - { - for (i = 0; i < colinfo->num_cols; i++) - { - char *colname = colinfo->colnames[i]; - - if (colname == NULL) - continue; - - /* Push down to left column, unless it's a system column */ - if (leftattnos[i] > 0) - { - expand_colnames_array_to(leftcolinfo, leftattnos[i]); - leftcolinfo->colnames[leftattnos[i] - 1] = colname; - } - - /* Same on the righthand side */ - if (rightattnos[i] > 0) - { - expand_colnames_array_to(rightcolinfo, rightattnos[i]); - rightcolinfo->colnames[rightattnos[i] - 1] = colname; - } - } - } - - /* - * If there's a USING clause, select the USING column names and push - * those names down to the children. We have two strategies: - * - * If dpns->unique_using is true, we force all USING names to be - * unique across the whole query level. In principle we'd only need - * the names of dangerous USING columns to be globally unique, but to - * safely assign all USING names in a single pass, we have to enforce - * the same uniqueness rule for all of them. However, if a USING - * column's name has been pushed down from the parent, we should use - * it as-is rather than making a uniqueness adjustment. This is - * necessary when we're at an unnamed join, and it creates no risk of - * ambiguity. Also, if there's a user-written output alias for a - * merged column, we prefer to use that rather than the input name; - * this simplifies the logic and seems likely to lead to less aliasing - * overall. - * - * If dpns->unique_using is false, we only need USING names to be - * unique within their own join RTE. We still need to honor - * pushed-down names, though. - * - * Though significantly different in results, these two strategies are - * implemented by the same code, with only the difference of whether - * to put assigned names into dpns->using_names. - */ - if (j->usingClause) - { - /* Copy the input parentUsing list so we don't modify it */ - parentUsing = list_copy(parentUsing); - - /* USING names must correspond to the first join output columns */ - expand_colnames_array_to(colinfo, list_length(j->usingClause)); - i = 0; - foreach(lc, j->usingClause) - { - char *colname = strVal(lfirst(lc)); - - /* Assert it's a merged column */ - Assert(leftattnos[i] != 0 && rightattnos[i] != 0); - - /* Adopt passed-down name if any, else select unique name */ - if (colinfo->colnames[i] != NULL) - colname = colinfo->colnames[i]; - else - { - /* Prefer user-written output alias if any */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - /* Make it appropriately unique */ - colname = make_colname_unique(colname, dpns, colinfo); - if (dpns->unique_using) - dpns->using_names = lappend(dpns->using_names, - colname); - /* Save it as output column name, too */ - colinfo->colnames[i] = colname; - } - - /* Remember selected names for use later */ - colinfo->usingNames = lappend(colinfo->usingNames, colname); - parentUsing = lappend(parentUsing, colname); - - /* Push down to left column, unless it's a system column */ - if (leftattnos[i] > 0) - { - expand_colnames_array_to(leftcolinfo, leftattnos[i]); - leftcolinfo->colnames[leftattnos[i] - 1] = colname; - } - - /* Same on the righthand side */ - if (rightattnos[i] > 0) - { - expand_colnames_array_to(rightcolinfo, rightattnos[i]); - rightcolinfo->colnames[rightattnos[i] - 1] = colname; - } - - i++; - } - } - - /* Mark child deparse_columns structs with correct parentUsing info */ - leftcolinfo->parentUsing = parentUsing; - rightcolinfo->parentUsing = parentUsing; - - /* Now recursively assign USING column names in children */ - set_using_names(dpns, j->larg, parentUsing); - set_using_names(dpns, j->rarg, parentUsing); - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); -} - -/* - * set_relation_column_names: select column aliases for a non-join RTE - * - * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. - * If any colnames entries are already filled in, those override local - * choices. - */ -static void -set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo) -{ - int ncolumns; - char **real_colnames; - bool changed_any; - bool has_anonymous; - int noldcolumns; - int i; - int j; - - /* - * Construct an array of the current "real" column names of the RTE. - * real_colnames[] will be indexed by physical column number, with NULL - * entries for dropped columns. - */ - if (rte->rtekind == RTE_RELATION || - GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - /* Relation --- look to the system catalogs for up-to-date info */ - Relation rel; - TupleDesc tupdesc; - - rel = relation_open(rte->relid, AccessShareLock); - tupdesc = RelationGetDescr(rel); - - ncolumns = tupdesc->natts; - real_colnames = (char **) palloc(ncolumns * sizeof(char *)); - - for (i = 0; i < ncolumns; i++) - { - Form_pg_attribute attr = TupleDescAttr(tupdesc, i); - - if (attr->attisdropped) - real_colnames[i] = NULL; - else - real_colnames[i] = pstrdup(NameStr(attr->attname)); - } - relation_close(rel, AccessShareLock); - } - else - { - /* Otherwise get the column names from eref or expandRTE() */ - List *colnames; - ListCell *lc; - - /* - * Functions returning composites have the annoying property that some - * of the composite type's columns might have been dropped since the - * query was parsed. If possible, use expandRTE() to handle that - * case, since it has the tedious logic needed to find out about - * dropped columns. However, if we're explaining a plan, then we - * don't have rte->functions because the planner thinks that won't be - * needed later, and that breaks expandRTE(). So in that case we have - * to rely on rte->eref, which may lead us to report a dropped - * column's old name; that seems close enough for EXPLAIN's purposes. - * - * For non-RELATION, non-FUNCTION RTEs, we can just look at rte->eref, - * which should be sufficiently up-to-date: no other RTE types can - * have columns get dropped from under them after parsing. - */ - if (rte->rtekind == RTE_FUNCTION && rte->functions != NIL) - { - /* Since we're not creating Vars, rtindex etc. don't matter */ - expandRTE(rte, 1, 0, -1, true /* include dropped */ , - &colnames, NULL); - } - else - colnames = rte->eref->colnames; - - ncolumns = list_length(colnames); - real_colnames = (char **) palloc(ncolumns * sizeof(char *)); - - i = 0; - foreach(lc, colnames) - { - /* - * If the column name we find here is an empty string, then it's a - * dropped column, so change to NULL. - */ - char *cname = strVal(lfirst(lc)); - - if (cname[0] == '\0') - cname = NULL; - real_colnames[i] = cname; - i++; - } - } - - /* - * Ensure colinfo->colnames has a slot for each column. (It could be long - * enough already, if we pushed down a name for the last column.) Note: - * it's possible that there are now more columns than there were when the - * query was parsed, ie colnames could be longer than rte->eref->colnames. - * We must assign unique aliases to the new columns too, else there could - * be unresolved conflicts when the view/rule is reloaded. - */ - expand_colnames_array_to(colinfo, ncolumns); - Assert(colinfo->num_cols == ncolumns); - - /* - * Make sufficiently large new_colnames and is_new_col arrays, too. - * - * Note: because we leave colinfo->num_new_cols zero until after the loop, - * colname_is_unique will not consult that array, which is fine because it - * would only be duplicate effort. - */ - colinfo->new_colnames = (char **) palloc(ncolumns * sizeof(char *)); - colinfo->is_new_col = (bool *) palloc(ncolumns * sizeof(bool)); - - /* - * Scan the columns, select a unique alias for each one, and store it in - * colinfo->colnames and colinfo->new_colnames. The former array has NULL - * entries for dropped columns, the latter omits them. Also mark - * new_colnames entries as to whether they are new since parse time; this - * is the case for entries beyond the length of rte->eref->colnames. - */ - noldcolumns = list_length(rte->eref->colnames); - changed_any = false; - has_anonymous = false; - j = 0; - for (i = 0; i < ncolumns; i++) - { - char *real_colname = real_colnames[i]; - char *colname = colinfo->colnames[i]; - - /* Skip dropped columns */ - if (real_colname == NULL) - { - Assert(colname == NULL); /* colnames[i] is already NULL */ - continue; - } - - /* If alias already assigned, that's what to use */ - if (colname == NULL) - { - /* If user wrote an alias, prefer that over real column name */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - else - colname = real_colname; - - /* Unique-ify and insert into colinfo */ - colname = make_colname_unique(colname, dpns, colinfo); - - colinfo->colnames[i] = colname; - } - - /* Put names of non-dropped columns in new_colnames[] too */ - colinfo->new_colnames[j] = colname; - /* And mark them as new or not */ - colinfo->is_new_col[j] = (i >= noldcolumns); - j++; - - /* Remember if any assigned aliases differ from "real" name */ - if (!changed_any && strcmp(colname, real_colname) != 0) - changed_any = true; - - /* - * Remember if there is a reference to an anonymous column as named by - * char * FigureColname(Node *node) - */ - if (!has_anonymous && strcmp(real_colname, "?column?") == 0) - has_anonymous = true; - } - - /* - * Set correct length for new_colnames[] array. (Note: if columns have - * been added, colinfo->num_cols includes them, which is not really quite - * right but is harmless, since any new columns must be at the end where - * they won't affect varattnos of pre-existing columns.) - */ - colinfo->num_new_cols = j; - - /* - * For a relation RTE, we need only print the alias column names if any - * are different from the underlying "real" names. For a function RTE, - * always emit a complete column alias list; this is to protect against - * possible instability of the default column names (eg, from altering - * parameter names). For tablefunc RTEs, we never print aliases, because - * the column names are part of the clause itself. For other RTE types, - * print if we changed anything OR if there were user-written column - * aliases (since the latter would be part of the underlying "reality"). - */ - if (rte->rtekind == RTE_RELATION) - colinfo->printaliases = changed_any; - else if (rte->rtekind == RTE_FUNCTION) - colinfo->printaliases = true; - else if (rte->rtekind == RTE_TABLEFUNC) - colinfo->printaliases = false; - else if (rte->alias && rte->alias->colnames != NIL) - colinfo->printaliases = true; - else - colinfo->printaliases = changed_any || has_anonymous; -} - -/* - * set_join_column_names: select column aliases for a join RTE - * - * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. - * If any colnames entries are already filled in, those override local - * choices. Also, names for USING columns were already chosen by - * set_using_names(). We further expect that column alias selection has been - * completed for both input RTEs. - */ -static void -set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo) -{ - deparse_columns *leftcolinfo; - deparse_columns *rightcolinfo; - bool changed_any; - int noldcolumns; - int nnewcolumns; - Bitmapset *leftmerged = NULL; - Bitmapset *rightmerged = NULL; - int i; - int j; - int ic; - int jc; - - /* Look up the previously-filled-in child deparse_columns structs */ - leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); - rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); - - /* - * Ensure colinfo->colnames has a slot for each column. (It could be long - * enough already, if we pushed down a name for the last column.) Note: - * it's possible that one or both inputs now have more columns than there - * were when the query was parsed, but we'll deal with that below. We - * only need entries in colnames for pre-existing columns. - */ - noldcolumns = list_length(rte->eref->colnames); - expand_colnames_array_to(colinfo, noldcolumns); - Assert(colinfo->num_cols == noldcolumns); - - /* - * Scan the join output columns, select an alias for each one, and store - * it in colinfo->colnames. If there are USING columns, set_using_names() - * already selected their names, so we can start the loop at the first - * non-merged column. - */ - changed_any = false; - for (i = list_length(colinfo->usingNames); i < noldcolumns; i++) - { - char *colname = colinfo->colnames[i]; - char *real_colname; - - /* Join column must refer to at least one input column */ - Assert(colinfo->leftattnos[i] != 0 || colinfo->rightattnos[i] != 0); - - /* Get the child column name */ - if (colinfo->leftattnos[i] > 0) - real_colname = leftcolinfo->colnames[colinfo->leftattnos[i] - 1]; - else if (colinfo->rightattnos[i] > 0) - real_colname = rightcolinfo->colnames[colinfo->rightattnos[i] - 1]; - else - { - /* We're joining system columns --- use eref name */ - real_colname = strVal(list_nth(rte->eref->colnames, i)); - } - /* If child col has been dropped, no need to assign a join colname */ - if (real_colname == NULL) - { - colinfo->colnames[i] = NULL; - continue; - } - - /* In an unnamed join, just report child column names as-is */ - if (rte->alias == NULL) - { - colinfo->colnames[i] = real_colname; - continue; - } - - /* If alias already assigned, that's what to use */ - if (colname == NULL) - { - /* If user wrote an alias, prefer that over real column name */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - else - colname = real_colname; - - /* Unique-ify and insert into colinfo */ - colname = make_colname_unique(colname, dpns, colinfo); - - colinfo->colnames[i] = colname; - } - - /* Remember if any assigned aliases differ from "real" name */ - if (!changed_any && strcmp(colname, real_colname) != 0) - changed_any = true; - } - - /* - * Calculate number of columns the join would have if it were re-parsed - * now, and create storage for the new_colnames and is_new_col arrays. - * - * Note: colname_is_unique will be consulting new_colnames[] during the - * loops below, so its not-yet-filled entries must be zeroes. - */ - nnewcolumns = leftcolinfo->num_new_cols + rightcolinfo->num_new_cols - - list_length(colinfo->usingNames); - colinfo->num_new_cols = nnewcolumns; - colinfo->new_colnames = (char **) palloc0(nnewcolumns * sizeof(char *)); - colinfo->is_new_col = (bool *) palloc0(nnewcolumns * sizeof(bool)); - - /* - * Generating the new_colnames array is a bit tricky since any new columns - * added since parse time must be inserted in the right places. This code - * must match the parser, which will order a join's columns as merged - * columns first (in USING-clause order), then non-merged columns from the - * left input (in attnum order), then non-merged columns from the right - * input (ditto). If one of the inputs is itself a join, its columns will - * be ordered according to the same rule, which means newly-added columns - * might not be at the end. We can figure out what's what by consulting - * the leftattnos and rightattnos arrays plus the input is_new_col arrays. - * - * In these loops, i indexes leftattnos/rightattnos (so it's join varattno - * less one), j indexes new_colnames/is_new_col, and ic/jc have similar - * meanings for the current child RTE. - */ - - /* Handle merged columns; they are first and can't be new */ - i = j = 0; - while (i < noldcolumns && - colinfo->leftattnos[i] != 0 && - colinfo->rightattnos[i] != 0) - { - /* column name is already determined and known unique */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - colinfo->is_new_col[j] = false; - - /* build bitmapsets of child attnums of merged columns */ - if (colinfo->leftattnos[i] > 0) - leftmerged = bms_add_member(leftmerged, colinfo->leftattnos[i]); - if (colinfo->rightattnos[i] > 0) - rightmerged = bms_add_member(rightmerged, colinfo->rightattnos[i]); - - i++, j++; - } - - /* Handle non-merged left-child columns */ - ic = 0; - for (jc = 0; jc < leftcolinfo->num_new_cols; jc++) - { - char *child_colname = leftcolinfo->new_colnames[jc]; - - if (!leftcolinfo->is_new_col[jc]) - { - /* Advance ic to next non-dropped old column of left child */ - while (ic < leftcolinfo->num_cols && - leftcolinfo->colnames[ic] == NULL) - ic++; - Assert(ic < leftcolinfo->num_cols); - ic++; - /* If it is a merged column, we already processed it */ - if (bms_is_member(ic, leftmerged)) - continue; - /* Else, advance i to the corresponding existing join column */ - while (i < colinfo->num_cols && - colinfo->colnames[i] == NULL) - i++; - Assert(i < colinfo->num_cols); - Assert(ic == colinfo->leftattnos[i]); - /* Use the already-assigned name of this column */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - i++; - } - else - { - /* - * Unique-ify the new child column name and assign, unless we're - * in an unnamed join, in which case just copy - */ - if (rte->alias != NULL) - { - colinfo->new_colnames[j] = - make_colname_unique(child_colname, dpns, colinfo); - if (!changed_any && - strcmp(colinfo->new_colnames[j], child_colname) != 0) - changed_any = true; - } - else - colinfo->new_colnames[j] = child_colname; - } - - colinfo->is_new_col[j] = leftcolinfo->is_new_col[jc]; - j++; - } - - /* Handle non-merged right-child columns in exactly the same way */ - ic = 0; - for (jc = 0; jc < rightcolinfo->num_new_cols; jc++) - { - char *child_colname = rightcolinfo->new_colnames[jc]; - - if (!rightcolinfo->is_new_col[jc]) - { - /* Advance ic to next non-dropped old column of right child */ - while (ic < rightcolinfo->num_cols && - rightcolinfo->colnames[ic] == NULL) - ic++; - Assert(ic < rightcolinfo->num_cols); - ic++; - /* If it is a merged column, we already processed it */ - if (bms_is_member(ic, rightmerged)) - continue; - /* Else, advance i to the corresponding existing join column */ - while (i < colinfo->num_cols && - colinfo->colnames[i] == NULL) - i++; - Assert(i < colinfo->num_cols); - Assert(ic == colinfo->rightattnos[i]); - /* Use the already-assigned name of this column */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - i++; - } - else - { - /* - * Unique-ify the new child column name and assign, unless we're - * in an unnamed join, in which case just copy - */ - if (rte->alias != NULL) - { - colinfo->new_colnames[j] = - make_colname_unique(child_colname, dpns, colinfo); - if (!changed_any && - strcmp(colinfo->new_colnames[j], child_colname) != 0) - changed_any = true; - } - else - colinfo->new_colnames[j] = child_colname; - } - - colinfo->is_new_col[j] = rightcolinfo->is_new_col[jc]; - j++; - } - - /* Assert we processed the right number of columns */ -#ifdef USE_ASSERT_CHECKING - for (int col_index = 0; col_index < colinfo->num_cols; col_index++) - { - /* - * In the above processing-loops, "i" advances only if - * the column is not new, check if this is a new column. - */ - if (colinfo->is_new_col[col_index]) - i++; - } - Assert(j == nnewcolumns); -#endif - - /* - * For a named join, print column aliases if we changed any from the child - * names. Unnamed joins cannot print aliases. - */ - if (rte->alias != NULL) - colinfo->printaliases = changed_any; - else - colinfo->printaliases = false; -} - -/* - * colname_is_unique: is colname distinct from already-chosen column names? - * - * dpns is query-wide info, colinfo is for the column's RTE - */ -static bool -colname_is_unique(const char *colname, deparse_namespace *dpns, - deparse_columns *colinfo) -{ - int i; - ListCell *lc; - - /* Check against already-assigned column aliases within RTE */ - for (i = 0; i < colinfo->num_cols; i++) - { - char *oldname = colinfo->colnames[i]; - - if (oldname && strcmp(oldname, colname) == 0) - return false; - } - - /* - * If we're building a new_colnames array, check that too (this will be - * partially but not completely redundant with the previous checks) - */ - for (i = 0; i < colinfo->num_new_cols; i++) - { - char *oldname = colinfo->new_colnames[i]; - - if (oldname && strcmp(oldname, colname) == 0) - return false; - } - - /* Also check against USING-column names that must be globally unique */ - foreach(lc, dpns->using_names) - { - char *oldname = (char *) lfirst(lc); - - if (strcmp(oldname, colname) == 0) - return false; - } - - /* Also check against names already assigned for parent-join USING cols */ - foreach(lc, colinfo->parentUsing) - { - char *oldname = (char *) lfirst(lc); - - if (strcmp(oldname, colname) == 0) - return false; - } - - return true; -} - -/* - * make_colname_unique: modify colname if necessary to make it unique - * - * dpns is query-wide info, colinfo is for the column's RTE - */ -static char * -make_colname_unique(char *colname, deparse_namespace *dpns, - deparse_columns *colinfo) -{ - /* - * If the selected name isn't unique, append digits to make it so. For a - * very long input name, we might have to truncate to stay within - * NAMEDATALEN. - */ - if (!colname_is_unique(colname, dpns, colinfo)) - { - int colnamelen = strlen(colname); - char *modname = (char *) palloc(colnamelen + 16); - int i = 0; - - do - { - i++; - for (;;) - { - memcpy(modname, colname, colnamelen); - sprintf(modname + colnamelen, "_%d", i); - if (strlen(modname) < NAMEDATALEN) - break; - /* drop chars from colname to keep all the digits */ - colnamelen = pg_mbcliplen(colname, colnamelen, - colnamelen - 1); - } - } while (!colname_is_unique(modname, dpns, colinfo)); - colname = modname; - } - return colname; -} - -/* - * expand_colnames_array_to: make colinfo->colnames at least n items long - * - * Any added array entries are initialized to zero. - */ -static void -expand_colnames_array_to(deparse_columns *colinfo, int n) -{ - if (n > colinfo->num_cols) - { - if (colinfo->colnames == NULL) - colinfo->colnames = (char **) palloc0(n * sizeof(char *)); - else - { - colinfo->colnames = (char **) repalloc(colinfo->colnames, - n * sizeof(char *)); - memset(colinfo->colnames + colinfo->num_cols, 0, - (n - colinfo->num_cols) * sizeof(char *)); - } - colinfo->num_cols = n; - } -} - -/* - * identify_join_columns: figure out where columns of a join come from - * - * Fills the join-specific fields of the colinfo struct, except for - * usingNames which is filled later. - */ -static void -identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, - deparse_columns *colinfo) -{ - int numjoincols; - int jcolno; - int rcolno; - ListCell *lc; - - /* Extract left/right child RT indexes */ - if (IsA(j->larg, RangeTblRef)) - colinfo->leftrti = ((RangeTblRef *) j->larg)->rtindex; - else if (IsA(j->larg, JoinExpr)) - colinfo->leftrti = ((JoinExpr *) j->larg)->rtindex; - else - elog(ERROR, "unrecognized node type in jointree: %d", - (int) nodeTag(j->larg)); - if (IsA(j->rarg, RangeTblRef)) - colinfo->rightrti = ((RangeTblRef *) j->rarg)->rtindex; - else if (IsA(j->rarg, JoinExpr)) - colinfo->rightrti = ((JoinExpr *) j->rarg)->rtindex; - else - elog(ERROR, "unrecognized node type in jointree: %d", - (int) nodeTag(j->rarg)); - - /* Assert children will be processed earlier than join in second pass */ - Assert(colinfo->leftrti < j->rtindex); - Assert(colinfo->rightrti < j->rtindex); - - /* Initialize result arrays with zeroes */ - numjoincols = list_length(jrte->joinaliasvars); - Assert(numjoincols == list_length(jrte->eref->colnames)); - colinfo->leftattnos = (int *) palloc0(numjoincols * sizeof(int)); - colinfo->rightattnos = (int *) palloc0(numjoincols * sizeof(int)); - - /* - * Deconstruct RTE's joinleftcols/joinrightcols into desired format. - * Recall that the column(s) merged due to USING are the first column(s) - * of the join output. We need not do anything special while scanning - * joinleftcols, but while scanning joinrightcols we must distinguish - * merged from unmerged columns. - */ - jcolno = 0; - foreach(lc, jrte->joinleftcols) - { - int leftattno = lfirst_int(lc); - - colinfo->leftattnos[jcolno++] = leftattno; - } - rcolno = 0; - foreach(lc, jrte->joinrightcols) - { - int rightattno = lfirst_int(lc); - - if (rcolno < jrte->joinmergedcols) /* merged column? */ - colinfo->rightattnos[rcolno] = rightattno; - else - colinfo->rightattnos[jcolno++] = rightattno; - rcolno++; - } - Assert(jcolno == numjoincols); -} - -/* - * get_rtable_name: convenience function to get a previously assigned RTE alias - * - * The RTE must belong to the topmost namespace level in "context". - */ -static char * -get_rtable_name(int rtindex, deparse_context *context) -{ - deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); - - Assert(rtindex > 0 && rtindex <= list_length(dpns->rtable_names)); - return (char *) list_nth(dpns->rtable_names, rtindex - 1); -} - -/* - * set_deparse_plan: set up deparse_namespace to parse subexpressions - * of a given Plan node - * - * This sets the plan, outer_planstate, inner_planstate, outer_tlist, - * inner_tlist, and index_tlist fields. Caller is responsible for adjusting - * the ancestors list if necessary. Note that the rtable and ctes fields do - * not need to change when shifting attention to different plan nodes in a - * single plan tree. - */ -static void -set_deparse_plan(deparse_namespace *dpns, Plan *plan) -{ - dpns->plan = plan; - - /* - * We special-case Append and MergeAppend to pretend that the first child - * plan is the OUTER referent; we have to interpret OUTER Vars in their - * tlists according to one of the children, and the first one is the most - * natural choice. - */ - if (IsA(plan, Append)) - dpns->outer_plan = linitial(((Append *) plan)->appendplans); - else if (IsA(plan, MergeAppend)) - dpns->outer_plan = linitial(((MergeAppend *) plan)->mergeplans); - else - dpns->outer_plan = outerPlan(plan); - - if (dpns->outer_plan) - dpns->outer_tlist = dpns->outer_plan->targetlist; - else - dpns->outer_tlist = NIL; - - /* - * For a SubqueryScan, pretend the subplan is INNER referent. (We don't - * use OUTER because that could someday conflict with the normal meaning.) - * Likewise, for a CteScan, pretend the subquery's plan is INNER referent. - * For a WorkTableScan, locate the parent RecursiveUnion plan node and use - * that as INNER referent. - * - * For MERGE, make the inner tlist point to the merge source tlist, which - * is same as the targetlist that the ModifyTable's source plan provides. - * For ON CONFLICT .. UPDATE we just need the inner tlist to point to the - * excluded expression's tlist. (Similar to the SubqueryScan we don't want - * to reuse OUTER, it's used for RETURNING in some modify table cases, - * although not INSERT .. CONFLICT). - */ - if (IsA(plan, SubqueryScan)) - dpns->inner_plan = ((SubqueryScan *) plan)->subplan; - else if (IsA(plan, CteScan)) - dpns->inner_plan = list_nth(dpns->subplans, - ((CteScan *) plan)->ctePlanId - 1); - else if (IsA(plan, WorkTableScan)) - dpns->inner_plan = find_recursive_union(dpns, - (WorkTableScan *) plan); - else if (IsA(plan, ModifyTable)) - dpns->inner_plan = plan; - else - dpns->inner_plan = innerPlan(plan); - - if (IsA(plan, ModifyTable)) - { - if (((ModifyTable *) plan)->operation == CMD_MERGE) - dpns->inner_tlist = dpns->outer_tlist; - else - dpns->inner_tlist = ((ModifyTable *) plan)->exclRelTlist; - } - else if (dpns->inner_plan) - dpns->inner_tlist = dpns->inner_plan->targetlist; - else - dpns->inner_tlist = NIL; - - /* Set up referent for INDEX_VAR Vars, if needed */ - if (IsA(plan, IndexOnlyScan)) - dpns->index_tlist = ((IndexOnlyScan *) plan)->indextlist; - else if (IsA(plan, ForeignScan)) - dpns->index_tlist = ((ForeignScan *) plan)->fdw_scan_tlist; - else if (IsA(plan, CustomScan)) - dpns->index_tlist = ((CustomScan *) plan)->custom_scan_tlist; - else - dpns->index_tlist = NIL; -} - -/* - * Locate the ancestor plan node that is the RecursiveUnion generating - * the WorkTableScan's work table. We can match on wtParam, since that - * should be unique within the plan tree. - */ -static Plan * -find_recursive_union(deparse_namespace *dpns, WorkTableScan *wtscan) -{ - ListCell *lc; - - foreach(lc, dpns->ancestors) - { - Plan *ancestor = (Plan *) lfirst(lc); - - if (IsA(ancestor, RecursiveUnion) && - ((RecursiveUnion *) ancestor)->wtParam == wtscan->wtParam) - return ancestor; - } - elog(ERROR, "could not find RecursiveUnion for WorkTableScan with wtParam %d", - wtscan->wtParam); - return NULL; -} - -/* - * push_child_plan: temporarily transfer deparsing attention to a child plan - * - * When expanding an OUTER_VAR or INNER_VAR reference, we must adjust the - * deparse context in case the referenced expression itself uses - * OUTER_VAR/INNER_VAR. We modify the top stack entry in-place to avoid - * affecting levelsup issues (although in a Plan tree there really shouldn't - * be any). - * - * Caller must provide a local deparse_namespace variable to save the - * previous state for pop_child_plan. - */ -static void -push_child_plan(deparse_namespace *dpns, Plan *plan, - deparse_namespace *save_dpns) -{ - /* Save state for restoration later */ - *save_dpns = *dpns; - - /* Link current plan node into ancestors list */ - dpns->ancestors = lcons(dpns->plan, dpns->ancestors); - - /* Set attention on selected child */ - set_deparse_plan(dpns, plan); -} - -/* - * pop_child_plan: undo the effects of push_child_plan - */ -static void -pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) -{ - List *ancestors; - - /* Get rid of ancestors list cell added by push_child_plan */ - ancestors = list_delete_first(dpns->ancestors); - - /* Restore fields changed by push_child_plan */ - *dpns = *save_dpns; - - /* Make sure dpns->ancestors is right (may be unnecessary) */ - dpns->ancestors = ancestors; -} - -/* - * push_ancestor_plan: temporarily transfer deparsing attention to an - * ancestor plan - * - * When expanding a Param reference, we must adjust the deparse context - * to match the plan node that contains the expression being printed; - * otherwise we'd fail if that expression itself contains a Param or - * OUTER_VAR/INNER_VAR/INDEX_VAR variable. - * - * The target ancestor is conveniently identified by the ListCell holding it - * in dpns->ancestors. - * - * Caller must provide a local deparse_namespace variable to save the - * previous state for pop_ancestor_plan. - */ -static void -push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, - deparse_namespace *save_dpns) -{ - Plan *plan = (Plan *) lfirst(ancestor_cell); - - /* Save state for restoration later */ - *save_dpns = *dpns; - - /* Build a new ancestor list with just this node's ancestors */ - dpns->ancestors = - list_copy_tail(dpns->ancestors, - list_cell_number(dpns->ancestors, ancestor_cell) + 1); - - /* Set attention on selected ancestor */ - set_deparse_plan(dpns, plan); -} - -/* - * pop_ancestor_plan: undo the effects of push_ancestor_plan - */ -static void -pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) -{ - /* Free the ancestor list made in push_ancestor_plan */ - list_free(dpns->ancestors); - - /* Restore fields changed by push_ancestor_plan */ - *dpns = *save_dpns; -} - -/* ---------- - * deparse_shard_query - Parse back a query for execution on a shard - * - * Builds an SQL string to perform the provided query on a specific shard and - * places this string into the provided buffer. - * ---------- - */ -void -deparse_shard_query(Query *query, Oid distrelid, int64 shardid, - StringInfo buffer) -{ - get_query_def_extended(query, buffer, NIL, distrelid, shardid, NULL, - false, - 0, WRAP_COLUMN_DEFAULT, 0); -} - -/* ---------- - * get_query_def - Parse back one query parsetree - * - * query: parsetree to be displayed - * buf: output text is appended to buf - * parentnamespace: list (initially empty) of outer-level deparse_namespace's - * resultDesc: if not NULL, the output tuple descriptor for the view - * represented by a SELECT query. We use the column names from it - * to label SELECT output columns, in preference to names in the query - * colNamesVisible: true if the surrounding context cares about the output - * column names at all (as, for example, an EXISTS() context does not); - * when false, we can suppress dummy column labels such as "?column?" - * prettyFlags: bitmask of PRETTYFLAG_XXX options - * wrapColumn: maximum line length, or -1 to disable wrapping - * startIndent: initial indentation amount - * ---------- - */ -static void -get_query_def(Query *query, StringInfo buf, List *parentnamespace, - TupleDesc resultDesc, bool colNamesVisible, - int prettyFlags, int wrapColumn, int startIndent) -{ - get_query_def_extended(query, buf, parentnamespace, InvalidOid, 0, resultDesc, - colNamesVisible, - prettyFlags, wrapColumn, startIndent); -} - -/* ---------- - * get_query_def_extended - Parse back one query parsetree, optionally - * with extension using a shard identifier. - * - * If distrelid is valid and shardid is positive, the provided shardid is added - * any time the provided relid is deparsed, so that the query may be executed - * on a placement for the given shard. - * ---------- - */ -static void -get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace, - Oid distrelid, int64 shardid, TupleDesc resultDesc, - bool colNamesVisible, - int prettyFlags, int wrapColumn, int startIndent) -{ - deparse_context context; - deparse_namespace dpns; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - /* - * Before we begin to examine the query, acquire locks on referenced - * relations, and fix up deleted columns in JOIN RTEs. This ensures - * consistent results. Note we assume it's OK to scribble on the passed - * querytree! - * - * We are only deparsing the query (we are not about to execute it), so we - * only need AccessShareLock on the relations it mentions. - */ - AcquireRewriteLocks(query, false, false); - - /* - * Set search_path to NIL so that all objects outside of pg_catalog will be - * schema-prefixed. pg_catalog will be added automatically when we call - * PushEmptySearchPath(). - */ - int saveNestLevel = PushEmptySearchPath(); - - context.buf = buf; - context.namespaces = lcons(&dpns, list_copy(parentnamespace)); - context.resultDesc = NULL; - context.targetList = NIL; - context.windowClause = NIL; - context.varprefix = (parentnamespace != NIL || - list_length(query->rtable) != 1); - context.prettyFlags = prettyFlags; - context.wrapColumn = wrapColumn; - context.indentLevel = startIndent; - context.colNamesVisible = true; - context.inGroupBy = false; - context.varInOrderBy = false; - context.appendparents = NULL; - context.distrelid = distrelid; - context.shardid = shardid; - - set_deparse_for_query(&dpns, query, parentnamespace); - - switch (query->commandType) - { - case CMD_SELECT: - /* We set context.resultDesc only if it's a SELECT */ - context.resultDesc = resultDesc; - get_select_query_def(query, &context); - break; - - case CMD_UPDATE: - get_update_query_def(query, &context); - break; - - case CMD_INSERT: - get_insert_query_def(query, &context); - break; - - case CMD_DELETE: - get_delete_query_def(query, &context); - break; - - case CMD_MERGE: - get_merge_query_def(query, &context); - break; - - case CMD_NOTHING: - appendStringInfoString(buf, "NOTHING"); - break; - - case CMD_UTILITY: - get_utility_query_def(query, &context); - break; - - default: - elog(ERROR, "unrecognized query command type: %d", - query->commandType); - break; - } - - /* revert back to original search_path */ - PopEmptySearchPath(saveNestLevel); -} - -/* ---------- - * get_values_def - Parse back a VALUES list - * ---------- - */ -static void -get_values_def(List *values_lists, deparse_context *context) -{ - StringInfo buf = context->buf; - bool first_list = true; - ListCell *vtl; - - appendStringInfoString(buf, "VALUES "); - - foreach(vtl, values_lists) - { - List *sublist = (List *) lfirst(vtl); - bool first_col = true; - ListCell *lc; - - if (first_list) - first_list = false; - else - appendStringInfoString(buf, ", "); - - appendStringInfoChar(buf, '('); - foreach(lc, sublist) - { - Node *col = (Node *) lfirst(lc); - - if (first_col) - first_col = false; - else - appendStringInfoChar(buf, ','); - - /* - * Print the value. Whole-row Vars need special treatment. - */ - get_rule_expr_toplevel(col, context, false); - } - appendStringInfoChar(buf, ')'); - } -} - -/* ---------- - * get_with_clause - Parse back a WITH clause - * ---------- - */ -static void -get_with_clause(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - if (query->cteList == NIL) - return; - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - - if (query->hasRecursive) - sep = "WITH RECURSIVE "; - else - sep = "WITH "; - foreach(l, query->cteList) - { - CommonTableExpr *cte = (CommonTableExpr *) lfirst(l); - - appendStringInfoString(buf, sep); - appendStringInfoString(buf, quote_identifier(cte->ctename)); - if (cte->aliascolnames) - { - bool first = true; - ListCell *col; - - appendStringInfoChar(buf, '('); - foreach(col, cte->aliascolnames) - { - if (first) - first = false; - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, - quote_identifier(strVal(lfirst(col)))); - } - appendStringInfoChar(buf, ')'); - } - appendStringInfoString(buf, " AS "); - switch (cte->ctematerialized) - { - case CTEMaterializeDefault: - break; - case CTEMaterializeAlways: - appendStringInfoString(buf, "MATERIALIZED "); - break; - case CTEMaterializeNever: - appendStringInfoString(buf, "NOT MATERIALIZED "); - break; - } - appendStringInfoChar(buf, '('); - if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", 0, 0, 0); - get_query_def((Query *) cte->ctequery, buf, context->namespaces, NULL, - true, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", 0, 0, 0); - appendStringInfoChar(buf, ')'); - - if (cte->search_clause) - { - bool first = true; - ListCell *lc; - - appendStringInfo(buf, " SEARCH %s FIRST BY ", - cte->search_clause->search_breadth_first ? "BREADTH" : "DEPTH"); - - foreach(lc, cte->search_clause->search_col_list) - { - if (first) - first = false; - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, - quote_identifier(strVal(lfirst(lc)))); - } - - appendStringInfo(buf, " SET %s", quote_identifier(cte->search_clause->search_seq_column)); - } - - if (cte->cycle_clause) - { - bool first = true; - ListCell *lc; - - appendStringInfoString(buf, " CYCLE "); - - foreach(lc, cte->cycle_clause->cycle_col_list) - { - if (first) - first = false; - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, - quote_identifier(strVal(lfirst(lc)))); - } - - appendStringInfo(buf, " SET %s", quote_identifier(cte->cycle_clause->cycle_mark_column)); - - { - Const *cmv = castNode(Const, cte->cycle_clause->cycle_mark_value); - Const *cmd = castNode(Const, cte->cycle_clause->cycle_mark_default); - - if (!(cmv->consttype == BOOLOID && !cmv->constisnull && DatumGetBool(cmv->constvalue) == true && - cmd->consttype == BOOLOID && !cmd->constisnull && DatumGetBool(cmd->constvalue) == false)) - { - appendStringInfoString(buf, " TO "); - get_rule_expr(cte->cycle_clause->cycle_mark_value, context, false); - appendStringInfoString(buf, " DEFAULT "); - get_rule_expr(cte->cycle_clause->cycle_mark_default, context, false); - } - } - - appendStringInfo(buf, " USING %s", quote_identifier(cte->cycle_clause->cycle_path_column)); - } - - sep = ", "; - } - - if (PRETTY_INDENT(context)) - { - context->indentLevel -= PRETTYINDENT_STD; - appendContextKeyword(context, "", 0, 0, 0); - } - else - appendStringInfoChar(buf, ' '); -} - -/* ---------- - * get_select_query_def - Parse back a SELECT parsetree - * ---------- - */ -static void -get_select_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - bool force_colno; - ListCell *l; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* Subroutines may need to consult the SELECT targetlist and windowClause */ - context->targetList = query->targetList; - context->windowClause = query->windowClause; - - /* - * If the Query node has a setOperations tree, then it's the top level of - * a UNION/INTERSECT/EXCEPT query; only the WITH, ORDER BY and LIMIT - * fields are interesting in the top query itself. - */ - if (query->setOperations) - { - get_setop_query(query->setOperations, query, context); - /* ORDER BY clauses must be simple in this case */ - force_colno = true; - } - else - { - get_basic_select_query(query, context); - force_colno = false; - } - - /* Add the ORDER BY clause if given */ - if (query->sortClause != NIL) - { - appendContextKeyword(context, " ORDER BY ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_orderby(query->sortClause, query->targetList, - force_colno, context); - } - - /* - * Add the LIMIT/OFFSET clauses if given. If non-default options, use the - * standard spelling of LIMIT. - */ - if (query->limitOffset != NULL) - { - appendContextKeyword(context, " OFFSET ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - get_rule_expr(query->limitOffset, context, false); - } - if (query->limitCount != NULL) - { - if (query->limitOption == LIMIT_OPTION_WITH_TIES) - { - // had to add '(' and ')' here because it fails with casting - appendContextKeyword(context, " FETCH FIRST (", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - get_rule_expr(query->limitCount, context, false); - appendStringInfoString(buf, ") ROWS WITH TIES"); - } - else - { - appendContextKeyword(context, " LIMIT ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - if (IsA(query->limitCount, Const) && - ((Const *) query->limitCount)->constisnull) - appendStringInfoString(buf, "ALL"); - else - get_rule_expr(query->limitCount, context, false); - } - } - - /* Add FOR [KEY] UPDATE/SHARE clauses if present */ - if (query->hasForUpdate) - { - foreach(l, query->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(l); - - /* don't print implicit clauses */ - if (rc->pushedDown) - continue; - - switch (rc->strength) - { - case LCS_NONE: - /* we intentionally throw an error for LCS_NONE */ - elog(ERROR, "unrecognized LockClauseStrength %d", - (int) rc->strength); - break; - case LCS_FORKEYSHARE: - appendContextKeyword(context, " FOR KEY SHARE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORSHARE: - appendContextKeyword(context, " FOR SHARE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORNOKEYUPDATE: - appendContextKeyword(context, " FOR NO KEY UPDATE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORUPDATE: - appendContextKeyword(context, " FOR UPDATE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - } - - appendStringInfo(buf, " OF %s", - quote_identifier(get_rtable_name(rc->rti, - context))); - if (rc->waitPolicy == LockWaitError) - appendStringInfoString(buf, " NOWAIT"); - else if (rc->waitPolicy == LockWaitSkip) - appendStringInfoString(buf, " SKIP LOCKED"); - } - } -} - -/* - * Detect whether query looks like SELECT ... FROM VALUES(); - * if so, return the VALUES RTE. Otherwise return NULL. - */ -static RangeTblEntry * -get_simple_values_rte(Query *query, TupleDesc resultDesc) -{ - RangeTblEntry *result = NULL; - ListCell *lc; - int colno; - - /* - * We want to return true even if the Query also contains OLD or NEW rule - * RTEs. So the idea is to scan the rtable and see if there is only one - * inFromCl RTE that is a VALUES RTE. - */ - foreach(lc, query->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - - if (rte->rtekind == RTE_VALUES && rte->inFromCl) - { - if (result) - return NULL; /* multiple VALUES (probably not possible) */ - result = rte; - } - else if (rte->rtekind == RTE_RELATION && !rte->inFromCl) - continue; /* ignore rule entries */ - else - return NULL; /* something else -> not simple VALUES */ - } - - /* - * We don't need to check the targetlist in any great detail, because - * parser/analyze.c will never generate a "bare" VALUES RTE --- they only - * appear inside auto-generated sub-queries with very restricted - * structure. However, DefineView might have modified the tlist by - * injecting new column aliases; so compare tlist resnames against the - * RTE's names to detect that. - */ - if (result) - { - ListCell *lcn; - - if (list_length(query->targetList) != list_length(result->eref->colnames)) - return NULL; /* this probably cannot happen */ - colno = 0; - forboth(lc, query->targetList, lcn, result->eref->colnames) - { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - char *cname = strVal(lfirst(lcn)); - char *colname; - - if (tle->resjunk) - return NULL; /* this probably cannot happen */ - /* compute name that get_target_list would use for column */ - colno++; - if (resultDesc && colno <= resultDesc->natts) - colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname); - else - colname = tle->resname; - - /* does it match the VALUES RTE? */ - if (colname == NULL || strcmp(colname, cname) != 0) - return NULL; /* column name has been changed */ - } - } - - return result; -} - -static void -get_basic_select_query(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *values_rte; - char *sep; - ListCell *l; - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - - /* - * If the query looks like SELECT * FROM (VALUES ...), then print just the - * VALUES part. This reverses what transformValuesClause() did at parse - * time. - */ - values_rte = get_simple_values_rte(query, context->resultDesc); - if (values_rte) - { - get_values_def(values_rte->values_lists, context); - return; - } - - /* - * Build up the query string - first we say SELECT - */ - if (query->isReturn) - appendStringInfoString(buf, "RETURN"); - else - appendStringInfoString(buf, "SELECT"); - - /* Add the DISTINCT clause if given */ - if (query->distinctClause != NIL) - { - if (query->hasDistinctOn) - { - appendStringInfoString(buf, " DISTINCT ON ("); - sep = ""; - foreach(l, query->distinctClause) - { - SortGroupClause *srt = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(srt->tleSortGroupRef, query->targetList, - false, context); - sep = ", "; - } - appendStringInfoChar(buf, ')'); - } - else - appendStringInfoString(buf, " DISTINCT"); - } - - /* Then we tell what to select (the targetlist) */ - get_target_list(query->targetList, context); - - /* Add the FROM clause if needed */ - get_from_clause(query, " FROM ", context); - - /* Add the WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add the GROUP BY clause if given */ - if (query->groupClause != NULL || query->groupingSets != NULL) - { - bool save_ingroupby; - - appendContextKeyword(context, " GROUP BY ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - if (query->groupDistinct) - appendStringInfoString(buf, "DISTINCT "); - - save_ingroupby = context->inGroupBy; - context->inGroupBy = true; - - if (query->groupingSets == NIL) - { - sep = ""; - foreach(l, query->groupClause) - { - SortGroupClause *grp = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, query->targetList, - false, context); - sep = ", "; - } - } - else - { - sep = ""; - foreach(l, query->groupingSets) - { - GroupingSet *grp = lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_groupingset(grp, query->targetList, true, context); - sep = ", "; - } - } - - context->inGroupBy = save_ingroupby; - } - - /* Add the HAVING clause if given */ - if (query->havingQual != NULL) - { - appendContextKeyword(context, " HAVING ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - get_rule_expr(query->havingQual, context, false); - } - - /* Add the WINDOW clause if needed */ - if (query->windowClause != NIL) - get_rule_windowclause(query, context); -} - -/* ---------- - * get_target_list - Parse back a SELECT target list - * - * This is also used for RETURNING lists in INSERT/UPDATE/DELETE/MERGE. - * ---------- - */ -static void -get_target_list(List *targetList, deparse_context *context) -{ - StringInfo buf = context->buf; - StringInfoData targetbuf; - bool last_was_multiline = false; - char *sep; - int colno; - ListCell *l; - - /* we use targetbuf to hold each TLE's text temporarily */ - initStringInfo(&targetbuf); - - sep = " "; - colno = 0; - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - char *colname; - char *attname; - - if (tle->resjunk) - continue; /* ignore junk entries */ - - appendStringInfoString(buf, sep); - sep = ", "; - colno++; - - /* - * Put the new field text into targetbuf so we can decide after we've - * got it whether or not it needs to go on a new line. - */ - resetStringInfo(&targetbuf); - context->buf = &targetbuf; - - /* - * We special-case Var nodes rather than using get_rule_expr. This is - * needed because get_rule_expr will display a whole-row Var as - * "foo.*", which is the preferred notation in most contexts, but at - * the top level of a SELECT list it's not right (the parser will - * expand that notation into multiple columns, yielding behavior - * different from a whole-row Var). We need to call get_variable - * directly so that we can tell it to do the right thing, and so that - * we can get the attribute name which is the default AS label. - */ - if (tle->expr && (IsA(tle->expr, Var))) - { - attname = get_variable((Var *) tle->expr, 0, true, context); - } - else - { - get_rule_expr((Node *) tle->expr, context, true); - - /* - * When colNamesVisible is true, we should always show the - * assigned column name explicitly. Otherwise, show it only if - * it's not FigureColname's fallback. - */ - attname = context->colNamesVisible ? NULL : "?column?"; - } - - /* - * Figure out what the result column should be called. In the context - * of a view, use the view's tuple descriptor (so as to pick up the - * effects of any column RENAME that's been done on the view). - * Otherwise, just use what we can find in the TLE. - */ - if (context->resultDesc && colno <= context->resultDesc->natts) - colname = NameStr(TupleDescAttr(context->resultDesc, - colno - 1)->attname); - else - colname = tle->resname; - - /* Show AS unless the column's name is correct as-is */ - if (colname) /* resname could be NULL */ - { - if (attname == NULL || strcmp(attname, colname) != 0) - appendStringInfo(&targetbuf, " AS %s", quote_identifier(colname)); - } - - /* Restore context's output buffer */ - context->buf = buf; - - /* Consider line-wrapping if enabled */ - if (PRETTY_INDENT(context) && context->wrapColumn >= 0) - { - int leading_nl_pos; - - /* Does the new field start with a new line? */ - if (targetbuf.len > 0 && targetbuf.data[0] == '\n') - leading_nl_pos = 0; - else - leading_nl_pos = -1; - - /* If so, we shouldn't add anything */ - if (leading_nl_pos >= 0) - { - /* instead, remove any trailing spaces currently in buf */ - removeStringInfoSpaces(buf); - } - else - { - char *trailing_nl; - - /* Locate the start of the current line in the output buffer */ - trailing_nl = strrchr(buf->data, '\n'); - if (trailing_nl == NULL) - trailing_nl = buf->data; - else - trailing_nl++; - - /* - * Add a newline, plus some indentation, if the new field is - * not the first and either the new field would cause an - * overflow or the last field used more than one line. - */ - if (colno > 1 && - ((strlen(trailing_nl) + targetbuf.len > context->wrapColumn) || - last_was_multiline)) - appendContextKeyword(context, "", -PRETTYINDENT_STD, - PRETTYINDENT_STD, PRETTYINDENT_VAR); - } - - /* Remember this field's multiline status for next iteration */ - last_was_multiline = - (strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL); - } - - /* Add the new field */ - appendStringInfoString(buf, targetbuf.data); - } - - /* clean up */ - pfree(targetbuf.data); -} - -static void -get_setop_query(Node *setOp, Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - bool need_paren; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - if (IsA(setOp, RangeTblRef)) - { - RangeTblRef *rtr = (RangeTblRef *) setOp; - RangeTblEntry *rte = rt_fetch(rtr->rtindex, query->rtable); - Query *subquery = rte->subquery; - - Assert(subquery != NULL); - Assert(subquery->setOperations == NULL); - /* Need parens if WITH, ORDER BY, FOR UPDATE, or LIMIT; see gram.y */ - need_paren = (subquery->cteList || - subquery->sortClause || - subquery->rowMarks || - subquery->limitOffset || - subquery->limitCount); - if (need_paren) - appendStringInfoChar(buf, '('); - get_query_def(subquery, buf, context->namespaces, - context->resultDesc, context->colNamesVisible, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - if (need_paren) - appendStringInfoChar(buf, ')'); - } - else if (IsA(setOp, SetOperationStmt)) - { - SetOperationStmt *op = (SetOperationStmt *) setOp; - int subindent; - bool save_colnamesvisible; - - /* - * We force parens when nesting two SetOperationStmts, except when the - * lefthand input is another setop of the same kind. Syntactically, - * we could omit parens in rather more cases, but it seems best to use - * parens to flag cases where the setop operator changes. If we use - * parens, we also increase the indentation level for the child query. - * - * There are some cases in which parens are needed around a leaf query - * too, but those are more easily handled at the next level down (see - * code above). - */ - if (IsA(op->larg, SetOperationStmt)) - { - SetOperationStmt *lop = (SetOperationStmt *) op->larg; - - if (op->op == lop->op && op->all == lop->all) - need_paren = false; - else - need_paren = true; - } - else - need_paren = false; - - if (need_paren) - { - appendStringInfoChar(buf, '('); - subindent = PRETTYINDENT_STD; - appendContextKeyword(context, "", subindent, 0, 0); - } - else - subindent = 0; - - get_setop_query(op->larg, query, context); - - if (need_paren) - appendContextKeyword(context, ") ", -subindent, 0, 0); - else if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", -subindent, 0, 0); - else - appendStringInfoChar(buf, ' '); - - switch (op->op) - { - case SETOP_UNION: - appendStringInfoString(buf, "UNION "); - break; - case SETOP_INTERSECT: - appendStringInfoString(buf, "INTERSECT "); - break; - case SETOP_EXCEPT: - appendStringInfoString(buf, "EXCEPT "); - break; - default: - elog(ERROR, "unrecognized set op: %d", - (int) op->op); - } - if (op->all) - appendStringInfoString(buf, "ALL "); - - /* Always parenthesize if RHS is another setop */ - need_paren = IsA(op->rarg, SetOperationStmt); - - /* - * The indentation code here is deliberately a bit different from that - * for the lefthand input, because we want the line breaks in - * different places. - */ - if (need_paren) - { - appendStringInfoChar(buf, '('); - subindent = PRETTYINDENT_STD; - } - else - subindent = 0; - appendContextKeyword(context, "", subindent, 0, 0); - - /* - * The output column names of the RHS sub-select don't matter. - */ - save_colnamesvisible = context->colNamesVisible; - context->colNamesVisible = false; - get_setop_query(op->rarg, query, context); - context->colNamesVisible = save_colnamesvisible; - - if (PRETTY_INDENT(context)) - context->indentLevel -= subindent; - if (need_paren) - appendContextKeyword(context, ")", 0, 0, 0); - } - else - { - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(setOp)); - } -} - -/* - * Display a sort/group clause. - * - * Also returns the expression tree, so caller need not find it again. - */ -static Node * -get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno, - deparse_context *context) -{ - StringInfo buf = context->buf; - TargetEntry *tle; - Node *expr; - - tle = get_sortgroupref_tle(ref, tlist); - expr = (Node *) tle->expr; - - /* - * Use column-number form if requested by caller. Otherwise, if - * expression is a constant, force it to be dumped with an explicit cast - * as decoration --- this is because a simple integer constant is - * ambiguous (and will be misinterpreted by findTargetlistEntrySQL92()) if - * we dump it without any decoration. Similarly, if it's just a Var, - * there is risk of misinterpretation if the column name is reassigned in - * the SELECT list, so we may need to force table qualification. And, if - * it's anything more complex than a simple Var, then force extra parens - * around it, to ensure it can't be misinterpreted as a cube() or rollup() - * construct. - */ - if (force_colno) - { - Assert(!tle->resjunk); - appendStringInfo(buf, "%d", tle->resno); - } - else if (!expr) - /* do nothing, probably can't happen */ ; - else if (IsA(expr, Const)) - get_const_expr((Const *) expr, context, 1); - else if (IsA(expr, Var)) - { - /* Tell get_variable to check for name conflict */ - bool save_varinorderby = context->varInOrderBy; - context->varInOrderBy = true; - (void) get_variable((Var *) expr, 0, false, context); - context->varInOrderBy = save_varinorderby; - } - else - { - /* - * We must force parens for function-like expressions even if - * PRETTY_PAREN is off, since those are the ones in danger of - * misparsing. For other expressions we need to force them only if - * PRETTY_PAREN is on, since otherwise the expression will output them - * itself. (We can't skip the parens.) - */ - bool need_paren = (PRETTY_PAREN(context) - || IsA(expr, FuncExpr) - || IsA(expr, Aggref) - || IsA(expr, WindowFunc)); - - if (need_paren) - appendStringInfoChar(context->buf, '('); - get_rule_expr(expr, context, true); - if (need_paren) - appendStringInfoChar(context->buf, ')'); - } - - return expr; -} - -/* - * Display a GroupingSet - */ -static void -get_rule_groupingset(GroupingSet *gset, List *targetlist, - bool omit_parens, deparse_context *context) -{ - ListCell *l; - StringInfo buf = context->buf; - bool omit_child_parens = true; - char *sep = ""; - - switch (gset->kind) - { - case GROUPING_SET_EMPTY: - appendStringInfoString(buf, "()"); - return; - - case GROUPING_SET_SIMPLE: - { - if (!omit_parens || list_length(gset->content) != 1) - appendStringInfoChar(buf, '('); - - foreach(l, gset->content) - { - Index ref = lfirst_int(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(ref, targetlist, - false, context); - sep = ", "; - } - - if (!omit_parens || list_length(gset->content) != 1) - appendStringInfoChar(buf, ')'); - } - return; - - case GROUPING_SET_ROLLUP: - appendStringInfoString(buf, "ROLLUP("); - break; - case GROUPING_SET_CUBE: - appendStringInfoString(buf, "CUBE("); - break; - case GROUPING_SET_SETS: - appendStringInfoString(buf, "GROUPING SETS ("); - omit_child_parens = false; - break; - } - - foreach(l, gset->content) - { - appendStringInfoString(buf, sep); - get_rule_groupingset(lfirst(l), targetlist, omit_child_parens, context); - sep = ", "; - } - - appendStringInfoChar(buf, ')'); -} - -/* - * Display an ORDER BY list. - */ -static void -get_rule_orderby(List *orderList, List *targetList, - bool force_colno, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - sep = ""; - foreach(l, orderList) - { - SortGroupClause *srt = (SortGroupClause *) lfirst(l); - Node *sortexpr; - Oid sortcoltype; - TypeCacheEntry *typentry; - - appendStringInfoString(buf, sep); - sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, - force_colno, context); - sortcoltype = exprType(sortexpr); - /* See whether operator is default < or > for datatype */ - typentry = lookup_type_cache(sortcoltype, - TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); - if (srt->sortop == typentry->lt_opr) - { - /* ASC is default, so emit nothing for it */ - if (srt->nulls_first) - appendStringInfoString(buf, " NULLS FIRST"); - } - else if (srt->sortop == typentry->gt_opr) - { - appendStringInfoString(buf, " DESC"); - /* DESC defaults to NULLS FIRST */ - if (!srt->nulls_first) - appendStringInfoString(buf, " NULLS LAST"); - } - else - { - appendStringInfo(buf, " USING %s", - generate_operator_name(srt->sortop, - sortcoltype, - sortcoltype)); - /* be specific to eliminate ambiguity */ - if (srt->nulls_first) - appendStringInfoString(buf, " NULLS FIRST"); - else - appendStringInfoString(buf, " NULLS LAST"); - } - sep = ", "; - } -} - -/* - * Display a WINDOW clause. - * - * Note that the windowClause list might contain only anonymous window - * specifications, in which case we should print nothing here. - */ -static void -get_rule_windowclause(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - sep = NULL; - foreach(l, query->windowClause) - { - WindowClause *wc = (WindowClause *) lfirst(l); - - if (wc->name == NULL) - continue; /* ignore anonymous windows */ - - if (sep == NULL) - appendContextKeyword(context, " WINDOW ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - else - appendStringInfoString(buf, sep); - - appendStringInfo(buf, "%s AS ", quote_identifier(wc->name)); - - get_rule_windowspec(wc, query->targetList, context); - - sep = ", "; - } -} - -/* - * Display a window definition - */ -static void -get_rule_windowspec(WindowClause *wc, List *targetList, - deparse_context *context) -{ - StringInfo buf = context->buf; - bool needspace = false; - const char *sep; - ListCell *l; - - appendStringInfoChar(buf, '('); - if (wc->refname) - { - appendStringInfoString(buf, quote_identifier(wc->refname)); - needspace = true; - } - /* partition clauses are always inherited, so only print if no refname */ - if (wc->partitionClause && !wc->refname) - { - if (needspace) - appendStringInfoChar(buf, ' '); - appendStringInfoString(buf, "PARTITION BY "); - sep = ""; - foreach(l, wc->partitionClause) - { - SortGroupClause *grp = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, - false, context); - sep = ", "; - } - needspace = true; - } - /* print ordering clause only if not inherited */ - if (wc->orderClause && !wc->copiedOrder) - { - if (needspace) - appendStringInfoChar(buf, ' '); - appendStringInfoString(buf, "ORDER BY "); - get_rule_orderby(wc->orderClause, targetList, false, context); - needspace = true; - } - /* framing clause is never inherited, so print unless it's default */ - if (wc->frameOptions & FRAMEOPTION_NONDEFAULT) - { - if (needspace) - appendStringInfoChar(buf, ' '); - if (wc->frameOptions & FRAMEOPTION_RANGE) - appendStringInfoString(buf, "RANGE "); - else if (wc->frameOptions & FRAMEOPTION_ROWS) - appendStringInfoString(buf, "ROWS "); - else if (wc->frameOptions & FRAMEOPTION_GROUPS) - appendStringInfoString(buf, "GROUPS "); - else - Assert(false); - if (wc->frameOptions & FRAMEOPTION_BETWEEN) - appendStringInfoString(buf, "BETWEEN "); - if (wc->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) - appendStringInfoString(buf, "UNBOUNDED PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_START_CURRENT_ROW) - appendStringInfoString(buf, "CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_START_OFFSET) - { - get_rule_expr(wc->startOffset, context, false); - if (wc->frameOptions & FRAMEOPTION_START_OFFSET_PRECEDING) - appendStringInfoString(buf, " PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_START_OFFSET_FOLLOWING) - appendStringInfoString(buf, " FOLLOWING "); - else - Assert(false); - } - else - Assert(false); - if (wc->frameOptions & FRAMEOPTION_BETWEEN) - { - appendStringInfoString(buf, "AND "); - if (wc->frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING) - appendStringInfoString(buf, "UNBOUNDED FOLLOWING "); - else if (wc->frameOptions & FRAMEOPTION_END_CURRENT_ROW) - appendStringInfoString(buf, "CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_END_OFFSET) - { - get_rule_expr(wc->endOffset, context, false); - if (wc->frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING) - appendStringInfoString(buf, " PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING) - appendStringInfoString(buf, " FOLLOWING "); - else - Assert(false); - } - else - Assert(false); - } - if (wc->frameOptions & FRAMEOPTION_EXCLUDE_CURRENT_ROW) - appendStringInfoString(buf, "EXCLUDE CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_GROUP) - appendStringInfoString(buf, "EXCLUDE GROUP "); - else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_TIES) - appendStringInfoString(buf, "EXCLUDE TIES "); - /* we will now have a trailing space; remove it */ - buf->len--; - } - appendStringInfoChar(buf, ')'); -} - -/* ---------- - * get_insert_query_def - Parse back an INSERT parsetree - * ---------- - */ -static void -get_insert_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *select_rte = NULL; - RangeTblEntry *values_rte = NULL; - RangeTblEntry *rte; - ListCell *l; - List *strippedexprs = NIL; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * If it's an INSERT ... SELECT or multi-row VALUES, there will be a - * single RTE for the SELECT or VALUES. Plain VALUES has neither. - */ - foreach(l, query->rtable) - { - rte = (RangeTblEntry *) lfirst(l); - - if (rte->rtekind == RTE_SUBQUERY) - { - if (select_rte) - elog(ERROR, "too many subquery RTEs in INSERT"); - select_rte = rte; - } - - if (rte->rtekind == RTE_VALUES) - { - if (values_rte) - elog(ERROR, "too many values RTEs in INSERT"); - values_rte = rte; - } - } - if (select_rte && values_rte) - elog(ERROR, "both subquery and values RTEs in INSERT"); - - /* - * Start the query with INSERT INTO relname - */ - rte = rt_fetch(query->resultRelation, query->rtable); - Assert(rte->rtekind == RTE_RELATION); - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - appendStringInfo(buf, "INSERT INTO %s ", - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - /* INSERT requires AS keyword for target alias */ - if (rte->alias != NULL) - appendStringInfo(buf, "AS %s ", - quote_identifier(get_rtable_name(query->resultRelation, context))); - - /* - * Add the insert-column-names list. Any indirection decoration needed on - * the column names can be inferred from the top targetlist. - */ - if (query->targetList) - { - strippedexprs = get_insert_column_names_list(query->targetList, - buf, context, rte); - } - - if (query->override) - { - if (query->override == OVERRIDING_SYSTEM_VALUE) - appendStringInfoString(buf, "OVERRIDING SYSTEM VALUE "); - else if (query->override == OVERRIDING_USER_VALUE) - appendStringInfoString(buf, "OVERRIDING USER VALUE "); - } - - if (select_rte) - { - /* Add the SELECT */ - get_query_def(select_rte->subquery, buf, context->namespaces, NULL, - false, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - } - else if (values_rte) - { - /* Add the multi-VALUES expression lists */ - get_values_def(values_rte->values_lists, context); - } - else if (strippedexprs) - { - /* Add the single-VALUES expression list */ - appendContextKeyword(context, "VALUES (", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); - get_rule_list_toplevel(strippedexprs, context, false); - appendStringInfoChar(buf, ')'); - } - else - { - /* No expressions, so it must be DEFAULT VALUES */ - appendStringInfoString(buf, "DEFAULT VALUES"); - } - - /* Add ON CONFLICT if present */ - if (query->onConflict) - { - OnConflictExpr *confl = query->onConflict; - - appendStringInfoString(buf, " ON CONFLICT"); - - if (confl->arbiterElems) - { - /* Add the single-VALUES expression list */ - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) confl->arbiterElems, context, false); - appendStringInfoChar(buf, ')'); - - /* Add a WHERE clause (for partial indexes) if given */ - if (confl->arbiterWhere != NULL) - { - bool save_varprefix; - - /* - * Force non-prefixing of Vars, since parser assumes that they - * belong to target relation. WHERE clause does not use - * InferenceElem, so this is separately required. - */ - save_varprefix = context->varprefix; - context->varprefix = false; - - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(confl->arbiterWhere, context, false); - - context->varprefix = save_varprefix; - } - } - else if (OidIsValid(confl->constraint)) - { - char *constraint = get_constraint_name(confl->constraint); - int64 shardId = context->shardid; - - if (shardId > 0) - { - AppendShardIdToName(&constraint, shardId); - } - - if (!constraint) - elog(ERROR, "cache lookup failed for constraint %u", - confl->constraint); - appendStringInfo(buf, " ON CONSTRAINT %s", - quote_identifier(constraint)); - } - - if (confl->action == ONCONFLICT_NOTHING) - { - appendStringInfoString(buf, " DO NOTHING"); - } - else - { - appendStringInfoString(buf, " DO UPDATE SET "); - /* Deparse targetlist */ - get_update_query_targetlist_def(query, confl->onConflictSet, - context, rte); - - /* Add a WHERE clause if given */ - if (confl->onConflictWhere != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(confl->onConflictWhere, context, false); - } - } - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context); - } -} - -/* ---------- - * get_update_query_def - Parse back an UPDATE parsetree - * ---------- - */ -static void -get_update_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * Start the query with UPDATE relname SET - */ - rte = rt_fetch(query->resultRelation, query->rtable); - - if (PRETTY_INDENT(context)) - { - appendStringInfoChar(buf, ' '); - context->indentLevel += PRETTYINDENT_STD; - } - - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "UPDATE %s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, fragmentTableName)); - - if(rte->eref != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - else - { - appendStringInfo(buf, "UPDATE %s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - - if (rte->alias != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - - appendStringInfoString(buf, " SET "); - - /* Deparse targetlist */ - get_update_query_targetlist_def(query, query->targetList, context, rte); - - /* Add the FROM clause if needed */ - get_from_clause(query, " FROM ", context); - - /* Add a WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context); - } -} - -/* ---------- - * get_update_query_targetlist_def - Parse back an UPDATE targetlist - * ---------- - */ -static void -get_update_query_targetlist_def(Query *query, List *targetList, - deparse_context *context, RangeTblEntry *rte) -{ - StringInfo buf = context->buf; - ListCell *l; - ListCell *next_ma_cell; - int remaining_ma_columns; - const char *sep; - SubLink *cur_ma_sublink; - List *ma_sublinks; - - targetList = ExpandMergedSubscriptingRefEntries(targetList); - - /* - * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks - * into a list. We expect them to appear, in ID order, in resjunk tlist - * entries. - */ - ma_sublinks = NIL; - if (query->hasSubLinks) /* else there can't be any */ - { - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - - if (tle->resjunk && IsA(tle->expr, SubLink)) - { - SubLink *sl = (SubLink *) tle->expr; - - if (sl->subLinkType == MULTIEXPR_SUBLINK) - { - ma_sublinks = lappend(ma_sublinks, sl); - Assert(sl->subLinkId == list_length(ma_sublinks)); - } - } - } - - ensure_update_targetlist_in_param_order(targetList); - } - next_ma_cell = list_head(ma_sublinks); - cur_ma_sublink = NULL; - remaining_ma_columns = 0; - - /* Add the comma separated list of 'attname = value' */ - sep = ""; - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - Node *expr; - - if (tle->resjunk) - continue; /* ignore junk entries */ - - /* Emit separator (OK whether we're in multiassignment or not) */ - appendStringInfoString(buf, sep); - sep = ", "; - - /* - * Check to see if we're starting a multiassignment group: if so, - * output a left paren. - */ - if (next_ma_cell != NULL && cur_ma_sublink == NULL) - { - /* - * We must dig down into the expr to see if it's a PARAM_MULTIEXPR - * Param. That could be buried under FieldStores and - * SubscriptingRefs and CoerceToDomains (cf processIndirection()), - * and underneath those there could be an implicit type coercion. - * Because we would ignore implicit type coercions anyway, we - * don't need to be as careful as processIndirection() is about - * descending past implicit CoerceToDomains. - */ - expr = (Node *) tle->expr; - while (expr) - { - if (IsA(expr, FieldStore)) - { - FieldStore *fstore = (FieldStore *) expr; - - expr = (Node *) linitial(fstore->newvals); - } - else if (IsA(expr, SubscriptingRef)) - { - SubscriptingRef *sbsref = (SubscriptingRef *) expr; - - if (sbsref->refassgnexpr == NULL) - break; - expr = (Node *) sbsref->refassgnexpr; - } - else if (IsA(expr, CoerceToDomain)) - { - CoerceToDomain *cdomain = (CoerceToDomain *) expr; - - if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) - break; - expr = (Node *) cdomain->arg; - } - else - break; - } - expr = strip_implicit_coercions(expr); - - if (expr && IsA(expr, Param) && - ((Param *) expr)->paramkind == PARAM_MULTIEXPR) - { - cur_ma_sublink = (SubLink *) lfirst(next_ma_cell); - next_ma_cell = lnext(ma_sublinks, next_ma_cell); - remaining_ma_columns = count_nonjunk_tlist_entries( - ((Query *) cur_ma_sublink->subselect)->targetList); - Assert(((Param *) expr)->paramid == - ((cur_ma_sublink->subLinkId << 16) | 1)); - appendStringInfoChar(buf, '('); - } - } - - /* - * Put out name of target column; look in the catalogs, not at - * tle->resname, since resname will fail to track RENAME. - */ - appendStringInfoString(buf, - quote_identifier(get_attname(rte->relid, - tle->resno, - false))); - - /* - * Print any indirection needed (subfields or subscripts), and strip - * off the top-level nodes representing the indirection assignments. - */ - expr = processIndirection((Node *) tle->expr, context); - - /* - * If we're in a multiassignment, skip printing anything more, unless - * this is the last column; in which case, what we print should be the - * sublink, not the Param. - */ - if (cur_ma_sublink != NULL) - { - if (--remaining_ma_columns > 0) - continue; /* not the last column of multiassignment */ - appendStringInfoChar(buf, ')'); - expr = (Node *) cur_ma_sublink; - cur_ma_sublink = NULL; - } - - appendStringInfoString(buf, " = "); - - get_rule_expr(expr, context, false); - } -} - -/* ---------- - * get_delete_query_def - Parse back a DELETE parsetree - * ---------- - */ -static void -get_delete_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * Start the query with DELETE FROM relname - */ - rte = rt_fetch(query->resultRelation, query->rtable); - - if (PRETTY_INDENT(context)) - { - appendStringInfoChar(buf, ' '); - context->indentLevel += PRETTYINDENT_STD; - } - - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "DELETE FROM %s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, fragmentTableName)); - - if(rte->eref != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - else - { - appendStringInfo(buf, "DELETE FROM %s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - - if (rte->alias != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - - /* Add the USING clause if given */ - get_from_clause(query, " USING ", context); - - /* Add a WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context); - } -} - -/* ---------- - * get_merge_query_def - Parse back a MERGE parsetree - * ---------- - */ -static void -get_merge_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * Start the query with MERGE INTO - */ - RangeTblEntry *targetRte = ExtractResultRelationRTE(query); - - if (PRETTY_INDENT(context)) - { - appendStringInfoChar(buf, ' '); - context->indentLevel += PRETTYINDENT_STD; - } - - /* if it's a shard, do differently */ - if (GetRangeTblKind(targetRte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(targetRte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "MERGE INTO %s%s", - only_marker(targetRte), - generate_fragment_name(fragmentSchemaName, fragmentTableName)); - - if(targetRte->eref != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - else - { - appendStringInfo(buf, "MERGE INTO %s%s", - only_marker(targetRte), - generate_relation_or_shard_name(targetRte->relid, - context->distrelid, - context->shardid, NIL)); - - if (targetRte->alias != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - - /* - * Add the MERGE source relation -- USING - */ - get_from_clause(query, " USING ", context); - - /* - * Add the MERGE ON condition - */ - Assert(query->jointree->quals != NULL); - { - appendContextKeyword(context, " ON ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - ListCell *actionCell = NULL; - foreach(actionCell, query->mergeActionList) - { - MergeAction *action = (MergeAction *) lfirst(actionCell); - - /* Add WHEN [NOT] MATCHED */ - appendContextKeyword(context, " WHEN", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - appendStringInfo(buf, " %s", action->matched ? "MATCHED" : "NOT MATCHED"); - - /* Add optional AND */ - if (action->qual) - { - appendContextKeyword(context, " AND ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(action->qual, context, false); - } - - appendContextKeyword(context, " THEN", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - - switch (action->commandType) - { - case CMD_INSERT: - { - appendStringInfo(buf, " INSERT " ); - List *strippedexprs = NIL; - - if (action->targetList) - { - strippedexprs = get_insert_column_names_list(action->targetList, - buf, context, targetRte); - } - - if (strippedexprs) - { - /* Add the single-VALUES expression list */ - appendContextKeyword(context, "VALUES (", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); - get_rule_list_toplevel(strippedexprs, context, false); - appendStringInfoChar(buf, ')'); - } - else - { - /* No expressions, so it must be DEFAULT VALUES */ - appendStringInfoString(buf, "DEFAULT VALUES"); - } - } - break; - - case CMD_UPDATE: - appendStringInfo(buf, " UPDATE SET " ); - get_update_query_targetlist_def(query, action->targetList, - context, targetRte); - break; - - case CMD_DELETE: - appendStringInfo(buf, " DELETE" ); - break; - - case CMD_NOTHING: - appendStringInfo(buf, " DO NOTHING " ); - break; - - default: - elog(ERROR, "unknown action in MERGE WHEN clause"); - } - } - - /* - * RETURNING is not supported in MERGE, so it must be NULL, but if PG adds it later, - * we might miss it, let's raise an exception to investigate. - */ - if (unlikely(query->returningList)) - { - elog(ERROR, "Unexpected RETURNING clause in MERGE"); - } - - ereport(DEBUG1, (errmsg("", buf->data))); -} - - -/* ---------- - * get_utility_query_def - Parse back a UTILITY parsetree - * ---------- - */ -static void -get_utility_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - - if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt)) - { - NotifyStmt *stmt = (NotifyStmt *) query->utilityStmt; - - appendContextKeyword(context, "", - 0, PRETTYINDENT_STD, 1); - appendStringInfo(buf, "NOTIFY %s", - quote_identifier(stmt->conditionname)); - if (stmt->payload) - { - appendStringInfoString(buf, ", "); - simple_quote_literal(buf, stmt->payload); - } - } - else if (query->utilityStmt && IsA(query->utilityStmt, TruncateStmt)) - { - TruncateStmt *stmt = (TruncateStmt *) query->utilityStmt; - List *relationList = stmt->relations; - ListCell *relationCell = NULL; - - appendContextKeyword(context, "", - 0, PRETTYINDENT_STD, 1); - - appendStringInfo(buf, "TRUNCATE TABLE"); - - foreach(relationCell, relationList) - { - RangeVar *relationVar = (RangeVar *) lfirst(relationCell); - Oid relationId = RangeVarGetRelid(relationVar, NoLock, false); - char *relationName = generate_relation_or_shard_name(relationId, - context->distrelid, - context->shardid, NIL); - appendStringInfo(buf, " %s", relationName); - - if (lnext(relationList, relationCell) != NULL) - { - appendStringInfo(buf, ","); - } - } - - if (stmt->restart_seqs) - { - appendStringInfo(buf, " RESTART IDENTITY"); - } - - if (stmt->behavior == DROP_CASCADE) - { - appendStringInfo(buf, " CASCADE"); - } - } - else - { - /* Currently only NOTIFY utility commands can appear in rules */ - elog(ERROR, "unexpected utility statement type"); - } -} - -/* - * Display a Var appropriately. - * - * In some cases (currently only when recursing into an unnamed join) - * the Var's varlevelsup has to be interpreted with respect to a context - * above the current one; levelsup indicates the offset. - * - * If istoplevel is true, the Var is at the top level of a SELECT's - * targetlist, which means we need special treatment of whole-row Vars. - * Instead of the normal "tab.*", we'll print "tab.*::typename", which is a - * dirty hack to prevent "tab.*" from being expanded into multiple columns. - * (The parser will strip the useless coercion, so no inefficiency is added in - * dump and reload.) We used to print just "tab" in such cases, but that is - * ambiguous and will yield the wrong result if "tab" is also a plain column - * name in the query. - * - * Returns the attname of the Var, or NULL if the Var has no attname (because - * it is a whole-row Var or a subplan output reference). - */ -static char * -get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - AttrNumber attnum; - int varno; - AttrNumber varattno; - int netlevelsup; - deparse_namespace *dpns; - deparse_columns *colinfo; - char *refname; - char *attname; - bool need_prefix; - - /* Find appropriate nesting depth */ - netlevelsup = var->varlevelsup + levelsup; - if (netlevelsup >= list_length(context->namespaces)) - elog(ERROR, "bogus varlevelsup: %d offset %d", - var->varlevelsup, levelsup); - dpns = (deparse_namespace *) list_nth(context->namespaces, - netlevelsup); - - varno = var->varno; - varattno = var->varattno; - - - if (var->varnosyn > 0 && var->varnosyn <= list_length(dpns->rtable) && dpns->plan == NULL) { - rte = rt_fetch(var->varnosyn, dpns->rtable); - - /* - * if the rte var->varnosyn points to is not a regular table and it is a join - * then the correct relname will be found with var->varnosyn and var->varattnosyn - */ - if (rte->rtekind == RTE_JOIN && rte->relid == 0 && var->varnosyn != var->varno) { - varno = var->varnosyn; - varattno = var->varattnosyn; - } - } - - /* - * Try to find the relevant RTE in this rtable. In a plan tree, it's - * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig - * down into the subplans, or INDEX_VAR, which is resolved similarly. Also - * find the aliases previously assigned for this RTE. - */ - if (varno >= 1 && varno <= list_length(dpns->rtable)) - { - - /* - * We might have been asked to map child Vars to some parent relation. - */ - if (context->appendparents && dpns->appendrels) - { - - int pvarno = varno; - AttrNumber pvarattno = varattno; - AppendRelInfo *appinfo = dpns->appendrels[pvarno]; - bool found = false; - - /* Only map up to inheritance parents, not UNION ALL appendrels */ - while (appinfo && - rt_fetch(appinfo->parent_relid, - dpns->rtable)->rtekind == RTE_RELATION) - { - found = false; - if (pvarattno > 0) /* system columns stay as-is */ - { - if (pvarattno > appinfo->num_child_cols) - break; /* safety check */ - pvarattno = appinfo->parent_colnos[pvarattno - 1]; - if (pvarattno == 0) - break; /* Var is local to child */ - } - - pvarno = appinfo->parent_relid; - found = true; - - /* If the parent is itself a child, continue up. */ - Assert(pvarno > 0 && pvarno <= list_length(dpns->rtable)); - appinfo = dpns->appendrels[pvarno]; - } - - /* - * If we found an ancestral rel, and that rel is included in - * appendparents, print that column not the original one. - */ - if (found && bms_is_member(pvarno, context->appendparents)) - { - varno = pvarno; - varattno = pvarattno; - } - } - - rte = rt_fetch(varno, dpns->rtable); - refname = (char *) list_nth(dpns->rtable_names, varno - 1); - colinfo = deparse_columns_fetch(varno, dpns); - attnum = varattno; - } - else - { - resolve_special_varno((Node *) var, context, get_special_variable, - NULL); - return NULL; - } - - /* - * The planner will sometimes emit Vars referencing resjunk elements of a - * subquery's target list (this is currently only possible if it chooses - * to generate a "physical tlist" for a SubqueryScan or CteScan node). - * Although we prefer to print subquery-referencing Vars using the - * subquery's alias, that's not possible for resjunk items since they have - * no alias. So in that case, drill down to the subplan and print the - * contents of the referenced tlist item. This works because in a plan - * tree, such Vars can only occur in a SubqueryScan or CteScan node, and - * we'll have set dpns->inner_plan to reference the child plan node. - */ - if ((rte->rtekind == RTE_SUBQUERY || rte->rtekind == RTE_CTE) && - attnum > list_length(rte->eref->colnames) && - dpns->inner_plan) - { - TargetEntry *tle; - deparse_namespace save_dpns; - - tle = get_tle_by_resno(dpns->inner_tlist, attnum); - if (!tle) - elog(ERROR, "invalid attnum %d for relation \"%s\"", - attnum, rte->eref->aliasname); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_plan, &save_dpns); - - /* - * Force parentheses because our caller probably assumed a Var is a - * simple expression. - */ - if (!IsA(tle->expr, Var)) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) tle->expr, context, true); - if (!IsA(tle->expr, Var)) - appendStringInfoChar(buf, ')'); - - pop_child_plan(dpns, &save_dpns); - return NULL; - } - - /* - * If it's an unnamed join, look at the expansion of the alias variable. - * If it's a simple reference to one of the input vars, then recursively - * print the name of that var instead. When it's not a simple reference, - * we have to just print the unqualified join column name. (This can only - * happen with "dangerous" merged columns in a JOIN USING; we took pains - * previously to make the unqualified column name unique in such cases.) - * - * This wouldn't work in decompiling plan trees, because we don't store - * joinaliasvars lists after planning; but a plan tree should never - * contain a join alias variable. - */ - if (rte->rtekind == RTE_JOIN && rte->alias == NULL) - { - if (rte->joinaliasvars == NIL) - elog(ERROR, "cannot decompile join alias var in plan tree"); - if (attnum > 0) - { - Var *aliasvar; - - aliasvar = (Var *) list_nth(rte->joinaliasvars, attnum - 1); - /* we intentionally don't strip implicit coercions here */ - if (aliasvar && IsA(aliasvar, Var)) - { - return get_variable(aliasvar, var->varlevelsup + levelsup, - istoplevel, context); - } - } - - /* - * Unnamed join has no refname. (Note: since it's unnamed, there is - * no way the user could have referenced it to create a whole-row Var - * for it. So we don't have to cover that case below.) - */ - Assert(refname == NULL); - } - - if (attnum == InvalidAttrNumber) - attname = NULL; - else if (attnum > 0) - { - /* Get column name to use from the colinfo struct */ - if (attnum > colinfo->num_cols) - elog(ERROR, "invalid attnum %d for relation \"%s\"", - attnum, rte->eref->aliasname); - attname = colinfo->colnames[attnum - 1]; - - /* - * If we find a Var referencing a dropped column, it seems better to - * print something (anything) than to fail. In general this should - * not happen, but it used to be possible for some cases involving - * functions returning named composite types, and perhaps there are - * still bugs out there. - */ - if (attname == NULL) - attname = "?dropped?column?"; - } - else if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - /* System column on a Citus shard */ - attname = get_attname(rte->relid, attnum, false); - } - else - { - /* System column - name is fixed, get it from the catalog */ - attname = get_rte_attribute_name(rte, attnum); - } - - need_prefix = (context->varprefix || attname == NULL); - /* - * If we're considering a plain Var in an ORDER BY (but not GROUP BY) - * clause, we may need to add a table-name prefix to prevent - * findTargetlistEntrySQL92 from misinterpreting the name as an - * output-column name. To avoid cluttering the output with unnecessary - * prefixes, do so only if there is a name match to a SELECT tlist item - * that is different from the Var. - */ - if (context->varInOrderBy && !context->inGroupBy && !need_prefix) - { - int colno = 0; - ListCell *l; - foreach(l, context->targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - char *colname; - if (tle->resjunk) - continue; /* ignore junk entries */ - colno++; - /* This must match colname-choosing logic in get_target_list() */ - if (context->resultDesc && colno <= context->resultDesc->natts) - colname = NameStr(TupleDescAttr(context->resultDesc, - colno - 1)->attname); - else - colname = tle->resname; - if (colname && strcmp(colname, attname) == 0 && - !equal(var, tle->expr)) - { - need_prefix = true; - break; - } - } - } - - if (refname && need_prefix) - { - appendStringInfoString(buf, quote_identifier(refname)); - appendStringInfoChar(buf, '.'); - } - if (attname) - appendStringInfoString(buf, quote_identifier(attname)); - else - { - appendStringInfoChar(buf, '*'); - - if (istoplevel) - { - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - /* use rel.*::shard_name instead of rel.*::table_name */ - appendStringInfo(buf, "::%s", - generate_rte_shard_name(rte)); - } - else - { - appendStringInfo(buf, "::%s", - format_type_with_typemod(var->vartype, - var->vartypmod)); - } - } - } - - return attname; -} - -/* - * Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This - * routine is actually a callback for get_special_varno, which handles finding - * the correct TargetEntry. We get the expression contained in that - * TargetEntry and just need to deparse it, a job we can throw back on - * get_rule_expr. - */ -static void -get_special_variable(Node *node, deparse_context *context, void *callback_arg) -{ - StringInfo buf = context->buf; - - /* - * For a non-Var referent, force parentheses because our caller probably - * assumed a Var is a simple expression. - */ - if (!IsA(node, Var)) - appendStringInfoChar(buf, '('); - get_rule_expr(node, context, true); - if (!IsA(node, Var)) - appendStringInfoChar(buf, ')'); -} - -/* - * Chase through plan references to special varnos (OUTER_VAR, INNER_VAR, - * INDEX_VAR) until we find a real Var or some kind of non-Var node; then, - * invoke the callback provided. - */ -static void -resolve_special_varno(Node *node, deparse_context *context, rsv_callback callback, void *callback_arg) -{ - Var *var; - deparse_namespace *dpns; - - /* This function is recursive, so let's be paranoid. */ - check_stack_depth(); - - /* If it's not a Var, invoke the callback. */ - if (!IsA(node, Var)) - { - (*callback) (node, context, callback_arg); - return; - } - - /* Find appropriate nesting depth */ - var = (Var *) node; - dpns = (deparse_namespace *) list_nth(context->namespaces, - var->varlevelsup); - - /* - * It's a special RTE, so recurse. - */ - if (var->varno == OUTER_VAR && dpns->outer_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - Bitmapset *save_appendparents; - - tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); - - /* If we're descending to the first child of an Append or MergeAppend, - * update appendparents. This will affect deparsing of all Vars - * appearing within the eventually-resolved subexpression. - */ - save_appendparents = context->appendparents; - - if (IsA(dpns->plan, Append)) - context->appendparents = bms_union(context->appendparents, - ((Append *) dpns->plan)->apprelids); - else if (IsA(dpns->plan, MergeAppend)) - context->appendparents = bms_union(context->appendparents, - ((MergeAppend *) dpns->plan)->apprelids); - - push_child_plan(dpns, dpns->outer_plan, &save_dpns); - resolve_special_varno((Node *) tle->expr, context, - callback, callback_arg); - pop_child_plan(dpns, &save_dpns); - context->appendparents = save_appendparents; - return; - } - else if (var->varno == INNER_VAR && dpns->inner_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - - tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); - - push_child_plan(dpns, dpns->inner_plan, &save_dpns); - resolve_special_varno((Node *) tle->expr, context, callback, callback_arg); - pop_child_plan(dpns, &save_dpns); - return; - } - else if (var->varno == INDEX_VAR && dpns->index_tlist) - { - TargetEntry *tle; - - tle = get_tle_by_resno(dpns->index_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); - - resolve_special_varno((Node *) tle->expr, context, callback, callback_arg); - return; - } - else if (var->varno < 1 || var->varno > list_length(dpns->rtable)) - elog(ERROR, "bogus varno: %d", var->varno); - - /* Not special. Just invoke the callback. */ - (*callback) (node, context, callback_arg); -} - -/* - * Get the name of a field of an expression of composite type. The - * expression is usually a Var, but we handle other cases too. - * - * levelsup is an extra offset to interpret the Var's varlevelsup correctly. - * - * This is fairly straightforward when the expression has a named composite - * type; we need only look up the type in the catalogs. However, the type - * could also be RECORD. Since no actual table or view column is allowed to - * have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE - * or to a subquery output. We drill down to find the ultimate defining - * expression and attempt to infer the field name from it. We ereport if we - * can't determine the name. - * - * Similarly, a PARAM of type RECORD has to refer to some expression of - * a determinable composite type. - */ -static const char * -get_name_for_var_field(Var *var, int fieldno, - int levelsup, deparse_context *context) -{ - RangeTblEntry *rte; - AttrNumber attnum; - int netlevelsup; - deparse_namespace *dpns; - int varno; - AttrNumber varattno; - TupleDesc tupleDesc; - Node *expr; - - /* - * If it's a RowExpr that was expanded from a whole-row Var, use the - * column names attached to it. - */ - if (IsA(var, RowExpr)) - { - RowExpr *r = (RowExpr *) var; - - if (fieldno > 0 && fieldno <= list_length(r->colnames)) - return strVal(list_nth(r->colnames, fieldno - 1)); - } - - /* - * If it's a Param of type RECORD, try to find what the Param refers to. - */ - if (IsA(var, Param)) - { - Param *param = (Param *) var; - ListCell *ancestor_cell; - - expr = find_param_referent(param, context, &dpns, &ancestor_cell); - if (expr) - { - /* Found a match, so recurse to decipher the field name */ - deparse_namespace save_dpns; - const char *result; - - push_ancestor_plan(dpns, ancestor_cell, &save_dpns); - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - pop_ancestor_plan(dpns, &save_dpns); - return result; - } - } - - /* - * If it's a Var of type RECORD, we have to find what the Var refers to; - * if not, we can use get_expr_result_tupdesc(). - */ - if (!IsA(var, Var) || - var->vartype != RECORDOID) - { - tupleDesc = get_expr_result_tupdesc((Node *) var, false); - /* Got the tupdesc, so we can extract the field name */ - Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); - return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname); - } - - /* Find appropriate nesting depth */ - netlevelsup = var->varlevelsup + levelsup; - if (netlevelsup >= list_length(context->namespaces)) - elog(ERROR, "bogus varlevelsup: %d offset %d", - var->varlevelsup, levelsup); - dpns = (deparse_namespace *) list_nth(context->namespaces, - netlevelsup); - - varno = var->varno; - varattno = var->varattno; - - if (var->varnosyn > 0 && var->varnosyn <= list_length(dpns->rtable) && dpns->plan == NULL) { - rte = rt_fetch(var->varnosyn, dpns->rtable); - - /* - * if the rte var->varnosyn points to is not a regular table and it is a join - * then the correct relname will be found with var->varnosyn and var->varattnosyn - */ - if (rte->rtekind == RTE_JOIN && rte->relid == 0 && var->varnosyn != var->varno) { - varno = var->varnosyn; - varattno = var->varattnosyn; - } - } - - /* - * Try to find the relevant RTE in this rtable. In a plan tree, it's - * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig - * down into the subplans, or INDEX_VAR, which is resolved similarly. - */ - if (varno >= 1 && varno <= list_length(dpns->rtable)) - { - rte = rt_fetch(varno, dpns->rtable); - attnum = varattno; - } - else if (varno == OUTER_VAR && dpns->outer_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - tle = get_tle_by_resno(dpns->outer_tlist, varattno); - if (!tle) - elog(ERROR, "bogus varattno for OUTER_VAR var: %d", varattno); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->outer_plan, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - else if (varno == INNER_VAR && dpns->inner_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - tle = get_tle_by_resno(dpns->inner_tlist, varattno); - if (!tle) - elog(ERROR, "bogus varattno for INNER_VAR var: %d", varattno); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_plan, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - else if (varno == INDEX_VAR && dpns->index_tlist) - { - TargetEntry *tle; - const char *result; - - tle = get_tle_by_resno(dpns->index_tlist, varattno); - if (!tle) - elog(ERROR, "bogus varattno for INDEX_VAR var: %d", varattno); - - Assert(netlevelsup == 0); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - return result; - } - else - { - elog(ERROR, "bogus varno: %d", varno); - return NULL; /* keep compiler quiet */ - } - - if (attnum == InvalidAttrNumber) - { - /* Var is whole-row reference to RTE, so select the right field */ - return get_rte_attribute_name(rte, fieldno); - } - - /* - * This part has essentially the same logic as the parser's - * expandRecordVariable() function, but we are dealing with a different - * representation of the input context, and we only need one field name - * not a TupleDesc. Also, we need special cases for finding subquery and - * CTE subplans when deparsing Plan trees. - */ - expr = (Node *) var; /* default if we can't drill down */ - - switch (rte->rtekind) - { - case RTE_RELATION: - case RTE_VALUES: - case RTE_NAMEDTUPLESTORE: - case RTE_RESULT: - - /* - * This case should not occur: a column of a table or values list - * shouldn't have type RECORD. Fall through and fail (most - * likely) at the bottom. - */ - break; - case RTE_SUBQUERY: - /* Subselect-in-FROM: examine sub-select's output expr */ - { - if (rte->subquery) - { - TargetEntry *ste = get_tle_by_resno(rte->subquery->targetList, - attnum); - - if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", - rte->eref->aliasname, attnum); - expr = (Node *) ste->expr; - if (IsA(expr, Var)) - { - /* - * Recurse into the sub-select to see what its Var - * refers to. We have to build an additional level of - * namespace to keep in step with varlevelsup in the - * subselect. - */ - deparse_namespace mydpns; - const char *result; - - set_deparse_for_query(&mydpns, rte->subquery, - context->namespaces); - - context->namespaces = lcons(&mydpns, - context->namespaces); - - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - - context->namespaces = - list_delete_first(context->namespaces); - - return result; - } - /* else fall through to inspect the expression */ - } - else - { - /* - * We're deparsing a Plan tree so we don't have complete - * RTE entries (in particular, rte->subquery is NULL). But - * the only place we'd see a Var directly referencing a - * SUBQUERY RTE is in a SubqueryScan plan node, and we can - * look into the child plan's tlist instead. - */ - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - if (!dpns->inner_plan) - elog(ERROR, "failed to find plan for subquery %s", - rte->eref->aliasname); - tle = get_tle_by_resno(dpns->inner_tlist, attnum); - if (!tle) - elog(ERROR, "bogus varattno for subquery var: %d", - attnum); - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_plan, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - } - break; - case RTE_JOIN: - /* Join RTE --- recursively inspect the alias variable */ - if (rte->joinaliasvars == NIL) - elog(ERROR, "cannot decompile join alias var in plan tree"); - Assert(attnum > 0 && attnum <= list_length(rte->joinaliasvars)); - expr = (Node *) list_nth(rte->joinaliasvars, attnum - 1); - Assert(expr != NULL); - /* we intentionally don't strip implicit coercions here */ - if (IsA(expr, Var)) - return get_name_for_var_field((Var *) expr, fieldno, - var->varlevelsup + levelsup, - context); - /* else fall through to inspect the expression */ - break; - case RTE_FUNCTION: - case RTE_TABLEFUNC: - - /* - * We couldn't get here unless a function is declared with one of - * its result columns as RECORD, which is not allowed. - */ - break; - case RTE_CTE: - /* CTE reference: examine subquery's output expr */ - { - CommonTableExpr *cte = NULL; - Index ctelevelsup; - ListCell *lc; - - /* - * Try to find the referenced CTE using the namespace stack. - */ - ctelevelsup = rte->ctelevelsup + netlevelsup; - if (ctelevelsup >= list_length(context->namespaces)) - lc = NULL; - else - { - deparse_namespace *ctedpns; - - ctedpns = (deparse_namespace *) - list_nth(context->namespaces, ctelevelsup); - foreach(lc, ctedpns->ctes) - { - cte = (CommonTableExpr *) lfirst(lc); - if (strcmp(cte->ctename, rte->ctename) == 0) - break; - } - } - if (lc != NULL) - { - Query *ctequery = (Query *) cte->ctequery; - TargetEntry *ste = get_tle_by_resno(GetCTETargetList(cte), - attnum); - - if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", - rte->eref->aliasname, attnum); - expr = (Node *) ste->expr; - if (IsA(expr, Var)) - { - /* - * Recurse into the CTE to see what its Var refers to. - * We have to build an additional level of namespace - * to keep in step with varlevelsup in the CTE. - * Furthermore it could be an outer CTE, so we may - * have to delete some levels of namespace. - */ - List *save_nslist = context->namespaces; - List *new_nslist; - deparse_namespace mydpns; - const char *result; - - set_deparse_for_query(&mydpns, ctequery, - context->namespaces); - - new_nslist = list_copy_tail(context->namespaces, - ctelevelsup); - context->namespaces = lcons(&mydpns, new_nslist); - - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - - context->namespaces = save_nslist; - - return result; - } - /* else fall through to inspect the expression */ - } - else - { - /* - * We're deparsing a Plan tree so we don't have a CTE - * list. But the only places we'd see a Var directly - * referencing a CTE RTE are in CteScan or WorkTableScan - * plan nodes. For those cases, set_deparse_plan arranged - * for dpns->inner_plan to be the plan node that emits the - * CTE or RecursiveUnion result, and we can look at its - * tlist instead. - */ - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - if (!dpns->inner_plan) - elog(ERROR, "failed to find plan for CTE %s", - rte->eref->aliasname); - tle = get_tle_by_resno(dpns->inner_tlist, attnum); - if (!tle) - elog(ERROR, "bogus varattno for subquery var: %d", - attnum); - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_plan, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - } - break; - } - - /* - * We now have an expression we can't expand any more, so see if - * get_expr_result_tupdesc() can do anything with it. - */ - tupleDesc = get_expr_result_tupdesc(expr, false); - /* Got the tupdesc, so we can extract the field name */ - Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); - return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname); -} - -/* - * Try to find the referenced expression for a PARAM_EXEC Param that might - * reference a parameter supplied by an upper NestLoop or SubPlan plan node. - * - * If successful, return the expression and set *dpns_p and *ancestor_cell_p - * appropriately for calling push_ancestor_plan(). If no referent can be - * found, return NULL. - */ -static Node * -find_param_referent(Param *param, deparse_context *context, - deparse_namespace **dpns_p, ListCell **ancestor_cell_p) -{ - /* Initialize output parameters to prevent compiler warnings */ - *dpns_p = NULL; - *ancestor_cell_p = NULL; - - /* - * If it's a PARAM_EXEC parameter, look for a matching NestLoopParam or - * SubPlan argument. This will necessarily be in some ancestor of the - * current expression's Plan. - */ - if (param->paramkind == PARAM_EXEC) - { - deparse_namespace *dpns; - Plan *child_plan; - bool in_same_plan_level; - ListCell *lc; - - dpns = (deparse_namespace *) linitial(context->namespaces); - child_plan = dpns->plan; - in_same_plan_level = true; - - foreach(lc, dpns->ancestors) - { - Node *ancestor = (Node *) lfirst(lc); - ListCell *lc2; - - /* - * NestLoops transmit params to their inner child only; also, once - * we've crawled up out of a subplan, this couldn't possibly be - * the right match. - */ - if (IsA(ancestor, NestLoop) && - child_plan == innerPlan(ancestor) && - in_same_plan_level) - { - NestLoop *nl = (NestLoop *) ancestor; - - foreach(lc2, nl->nestParams) - { - NestLoopParam *nlp = (NestLoopParam *) lfirst(lc2); - - if (nlp->paramno == param->paramid) - { - /* Found a match, so return it */ - *dpns_p = dpns; - *ancestor_cell_p = lc; - return (Node *) nlp->paramval; - } - } - } - - /* - * Check to see if we're crawling up from a subplan. - */ - if(IsA(ancestor, SubPlan)) - { - SubPlan *subplan = (SubPlan *) ancestor; - ListCell *lc3; - ListCell *lc4; - - /* Matched subplan, so check its arguments */ - forboth(lc3, subplan->parParam, lc4, subplan->args) - { - int paramid = lfirst_int(lc3); - Node *arg = (Node *) lfirst(lc4); - - if (paramid == param->paramid) - { - /* - * Found a match, so return it. But, since Vars in - * the arg are to be evaluated in the surrounding - * context, we have to point to the next ancestor item - * that is *not* a SubPlan. - */ - ListCell *rest; - - for_each_cell(rest, dpns->ancestors, - lnext(dpns->ancestors, lc)) - { - Node *ancestor2 = (Node *) lfirst(rest); - - if (!IsA(ancestor2, SubPlan)) - { - *dpns_p = dpns; - *ancestor_cell_p = rest; - return arg; - } - } - elog(ERROR, "SubPlan cannot be outermost ancestor"); - } - } - - /* We have emerged from a subplan. */ - in_same_plan_level = false; - - /* SubPlan isn't a kind of Plan, so skip the rest */ - continue; - } - - /* - * Check to see if we're emerging from an initplan of the current - * ancestor plan. Initplans never have any parParams, so no need - * to search that list, but we need to know if we should reset - * in_same_plan_level. - */ - foreach(lc2, ((Plan *) ancestor)->initPlan) - { - SubPlan *subplan = lfirst_node(SubPlan, lc2); - - if (child_plan != (Plan *) list_nth(dpns->subplans, - subplan->plan_id - 1)) - continue; - - /* No parameters to be had here. */ - Assert(subplan->parParam == NIL); - - /* We have emerged from an initplan. */ - in_same_plan_level = false; - break; - } - - /* No luck, crawl up to next ancestor */ - child_plan = (Plan *) ancestor; - } - } - - /* No referent found */ - return NULL; -} - -/* - * Display a Param appropriately. - */ -static void -get_parameter(Param *param, deparse_context *context) -{ - Node *expr; - deparse_namespace *dpns; - ListCell *ancestor_cell; - - /* - * If it's a PARAM_EXEC parameter, try to locate the expression from which - * the parameter was computed. Note that failing to find a referent isn't - * an error, since the Param might well be a subplan output rather than an - * input. - */ - expr = find_param_referent(param, context, &dpns, &ancestor_cell); - if (expr) - { - /* Found a match, so print it */ - deparse_namespace save_dpns; - bool save_varprefix; - bool need_paren; - - /* Switch attention to the ancestor plan node */ - push_ancestor_plan(dpns, ancestor_cell, &save_dpns); - - /* - * Force prefixing of Vars, since they won't belong to the relation - * being scanned in the original plan node. - */ - save_varprefix = context->varprefix; - context->varprefix = true; - - /* - * A Param's expansion is typically a Var, Aggref, GroupingFunc, or - * upper-level Param, which wouldn't need extra parentheses. - * Otherwise, insert parens to ensure the expression looks atomic. - */ - need_paren = !(IsA(expr, Var) || - IsA(expr, Aggref) || - IsA(expr, GroupingFunc) || - IsA(expr, Param)); - if (need_paren) - appendStringInfoChar(context->buf, '('); - - get_rule_expr(expr, context, false); - - if (need_paren) - appendStringInfoChar(context->buf, ')'); - - context->varprefix = save_varprefix; - - pop_ancestor_plan(dpns, &save_dpns); - - return; - } - - /* - * If it's an external parameter, see if the outermost namespace provides - * function argument names. - */ - if (param->paramkind == PARAM_EXTERN && context->namespaces != NIL) - { - dpns = llast(context->namespaces); - if (dpns->argnames && - param->paramid > 0 && - param->paramid <= dpns->numargs) - { - char *argname = dpns->argnames[param->paramid - 1]; - - if (argname) - { - bool should_qualify = false; - ListCell *lc; - - /* - * Qualify the parameter name if there are any other deparse - * namespaces with range tables. This avoids qualifying in - * trivial cases like "RETURN a + b", but makes it safe in all - * other cases. - */ - foreach(lc, context->namespaces) - { - deparse_namespace *dp_ns = lfirst(lc); - - if (list_length(dp_ns->rtable_names) > 0) - { - should_qualify = true; - break; - } - } - if (should_qualify) - { - appendStringInfoString(context->buf, quote_identifier(dpns->funcname)); - appendStringInfoChar(context->buf, '.'); - } - - appendStringInfoString(context->buf, quote_identifier(argname)); - return; - } - } - } - - /* - * Not PARAM_EXEC, or couldn't find referent: for base types just print $N. - * For composite types, add cast to the parameter to ease remote node detect - * the type. - */ - if (param->paramtype >= FirstNormalObjectId) - { - char *typeName = format_type_with_typemod(param->paramtype, param->paramtypmod); - - appendStringInfo(context->buf, "$%d::%s", param->paramid, typeName); - } - else - { - appendStringInfo(context->buf, "$%d", param->paramid); - } -} - -/* - * get_simple_binary_op_name - * - * helper function for isSimpleNode - * will return single char binary operator name, or NULL if it's not - */ -static const char * -get_simple_binary_op_name(OpExpr *expr) -{ - List *args = expr->args; - - if (list_length(args) == 2) - { - /* binary operator */ - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - const char *op; - - op = generate_operator_name(expr->opno, exprType(arg1), exprType(arg2)); - if (strlen(op) == 1) - return op; - } - return NULL; -} - -/* - * isSimpleNode - check if given node is simple (doesn't need parenthesizing) - * - * true : simple in the context of parent node's type - * false : not simple - */ -static bool -isSimpleNode(Node *node, Node *parentNode, int prettyFlags) -{ - if (!node) - return false; - - switch (nodeTag(node)) - { - case T_Var: - case T_Const: - case T_Param: - case T_CoerceToDomainValue: - case T_SetToDefault: - case T_CurrentOfExpr: - /* single words: always simple */ - return true; - - case T_SubscriptingRef: - case T_ArrayExpr: - case T_RowExpr: - case T_CoalesceExpr: - case T_MinMaxExpr: - case T_SQLValueFunction: - case T_XmlExpr: - case T_NextValueExpr: - case T_NullIfExpr: - case T_Aggref: - case T_GroupingFunc: - case T_WindowFunc: - case T_FuncExpr: - /* function-like: name(..) or name[..] */ - return true; - - /* CASE keywords act as parentheses */ - case T_CaseExpr: - return true; - - case T_FieldSelect: - - /* - * appears simple since . has top precedence, unless parent is - * T_FieldSelect itself! - */ - return !IsA(parentNode, FieldSelect); - - case T_FieldStore: - - /* - * treat like FieldSelect (probably doesn't matter) - */ - return !IsA(parentNode, FieldStore); - - case T_CoerceToDomain: - /* maybe simple, check args */ - return isSimpleNode((Node *) ((CoerceToDomain *) node)->arg, - node, prettyFlags); - case T_RelabelType: - return isSimpleNode((Node *) ((RelabelType *) node)->arg, - node, prettyFlags); - case T_CoerceViaIO: - return isSimpleNode((Node *) ((CoerceViaIO *) node)->arg, - node, prettyFlags); - case T_ArrayCoerceExpr: - return isSimpleNode((Node *) ((ArrayCoerceExpr *) node)->arg, - node, prettyFlags); - case T_ConvertRowtypeExpr: - return isSimpleNode((Node *) ((ConvertRowtypeExpr *) node)->arg, - node, prettyFlags); - - case T_OpExpr: - { - /* depends on parent node type; needs further checking */ - if (prettyFlags & PRETTYFLAG_PAREN && IsA(parentNode, OpExpr)) - { - const char *op; - const char *parentOp; - bool is_lopriop; - bool is_hipriop; - bool is_lopriparent; - bool is_hipriparent; - - op = get_simple_binary_op_name((OpExpr *) node); - if (!op) - return false; - - /* We know only the basic operators + - and * / % */ - is_lopriop = (strchr("+-", *op) != NULL); - is_hipriop = (strchr("*/%", *op) != NULL); - if (!(is_lopriop || is_hipriop)) - return false; - - parentOp = get_simple_binary_op_name((OpExpr *) parentNode); - if (!parentOp) - return false; - - is_lopriparent = (strchr("+-", *parentOp) != NULL); - is_hipriparent = (strchr("*/%", *parentOp) != NULL); - if (!(is_lopriparent || is_hipriparent)) - return false; - - if (is_hipriop && is_lopriparent) - return true; /* op binds tighter than parent */ - - if (is_lopriop && is_hipriparent) - return false; - - /* - * Operators are same priority --- can skip parens only if - * we have (a - b) - c, not a - (b - c). - */ - if (node == (Node *) linitial(((OpExpr *) parentNode)->args)) - return true; - - return false; - } - /* else do the same stuff as for T_SubLink et al. */ - } - /* FALLTHROUGH */ - - case T_SubLink: - case T_NullTest: - case T_BooleanTest: - case T_DistinctExpr: - switch (nodeTag(parentNode)) - { - case T_FuncExpr: - { - /* special handling for casts */ - CoercionForm type = ((FuncExpr *) parentNode)->funcformat; - - if (type == COERCE_EXPLICIT_CAST || - type == COERCE_IMPLICIT_CAST) - return false; - return true; /* own parentheses */ - } - case T_BoolExpr: /* lower precedence */ - case T_SubscriptingRef: /* other separators */ - case T_ArrayExpr: /* other separators */ - case T_RowExpr: /* other separators */ - case T_CoalesceExpr: /* own parentheses */ - case T_MinMaxExpr: /* own parentheses */ - case T_XmlExpr: /* own parentheses */ - case T_NullIfExpr: /* other separators */ - case T_Aggref: /* own parentheses */ - case T_GroupingFunc: /* own parentheses */ - case T_WindowFunc: /* own parentheses */ - case T_CaseExpr: /* other separators */ - return true; - default: - return false; - } - - case T_BoolExpr: - switch (nodeTag(parentNode)) - { - case T_BoolExpr: - if (prettyFlags & PRETTYFLAG_PAREN) - { - BoolExprType type; - BoolExprType parentType; - - type = ((BoolExpr *) node)->boolop; - parentType = ((BoolExpr *) parentNode)->boolop; - switch (type) - { - case NOT_EXPR: - case AND_EXPR: - if (parentType == AND_EXPR || parentType == OR_EXPR) - return true; - break; - case OR_EXPR: - if (parentType == OR_EXPR) - return true; - break; - } - } - return false; - case T_FuncExpr: - { - /* special handling for casts */ - CoercionForm type = ((FuncExpr *) parentNode)->funcformat; - - if (type == COERCE_EXPLICIT_CAST || - type == COERCE_IMPLICIT_CAST) - return false; - return true; /* own parentheses */ - } - case T_SubscriptingRef: /* other separators */ - case T_ArrayExpr: /* other separators */ - case T_RowExpr: /* other separators */ - case T_CoalesceExpr: /* own parentheses */ - case T_MinMaxExpr: /* own parentheses */ - case T_XmlExpr: /* own parentheses */ - case T_NullIfExpr: /* other separators */ - case T_Aggref: /* own parentheses */ - case T_GroupingFunc: /* own parentheses */ - case T_WindowFunc: /* own parentheses */ - case T_CaseExpr: /* other separators */ - return true; - default: - return false; - } - - default: - break; - } - /* those we don't know: in dubio complexo */ - return false; -} - -/* - * appendContextKeyword - append a keyword to buffer - * - * If prettyPrint is enabled, perform a line break, and adjust indentation. - * Otherwise, just append the keyword. - */ -static void -appendContextKeyword(deparse_context *context, const char *str, - int indentBefore, int indentAfter, int indentPlus) -{ - StringInfo buf = context->buf; - - if (PRETTY_INDENT(context)) - { - int indentAmount; - - context->indentLevel += indentBefore; - - /* remove any trailing spaces currently in the buffer ... */ - removeStringInfoSpaces(buf); - /* ... then add a newline and some spaces */ - appendStringInfoChar(buf, '\n'); - - if (context->indentLevel < PRETTYINDENT_LIMIT) - indentAmount = Max(context->indentLevel, 0) + indentPlus; - else - { - /* - * If we're indented more than PRETTYINDENT_LIMIT characters, try - * to conserve horizontal space by reducing the per-level - * indentation. For best results the scale factor here should - * divide all the indent amounts that get added to indentLevel - * (PRETTYINDENT_STD, etc). It's important that the indentation - * not grow unboundedly, else deeply-nested trees use O(N^2) - * whitespace; so we also wrap modulo PRETTYINDENT_LIMIT. - */ - indentAmount = PRETTYINDENT_LIMIT + - (context->indentLevel - PRETTYINDENT_LIMIT) / - (PRETTYINDENT_STD / 2); - indentAmount %= PRETTYINDENT_LIMIT; - /* scale/wrap logic affects indentLevel, but not indentPlus */ - indentAmount += indentPlus; - } - appendStringInfoSpaces(buf, indentAmount); - - appendStringInfoString(buf, str); - - context->indentLevel += indentAfter; - if (context->indentLevel < 0) - context->indentLevel = 0; - } - else - appendStringInfoString(buf, str); -} - -/* - * removeStringInfoSpaces - delete trailing spaces from a buffer. - * - * Possibly this should move to stringinfo.c at some point. - */ -static void -removeStringInfoSpaces(StringInfo str) -{ - while (str->len > 0 && str->data[str->len - 1] == ' ') - str->data[--(str->len)] = '\0'; -} - -/* - * get_rule_expr_paren - deparse expr using get_rule_expr, - * embracing the string with parentheses if necessary for prettyPrint. - * - * Never embrace if prettyFlags=0, because it's done in the calling node. - * - * Any node that does *not* embrace its argument node by sql syntax (with - * parentheses, non-operator keywords like CASE/WHEN/ON, or comma etc) should - * use get_rule_expr_paren instead of get_rule_expr so parentheses can be - * added. - */ -static void -get_rule_expr_paren(Node *node, deparse_context *context, - bool showimplicit, Node *parentNode) -{ - bool need_paren; - - need_paren = PRETTY_PAREN(context) && - !isSimpleNode(node, parentNode, context->prettyFlags); - - if (need_paren) - appendStringInfoChar(context->buf, '('); - - get_rule_expr(node, context, showimplicit); - - if (need_paren) - appendStringInfoChar(context->buf, ')'); -} - -/* ---------- - * get_rule_expr - Parse back an expression - * - * Note: showimplicit determines whether we display any implicit cast that - * is present at the top of the expression tree. It is a passed argument, - * not a field of the context struct, because we change the value as we - * recurse down into the expression. In general we suppress implicit casts - * when the result type is known with certainty (eg, the arguments of an - * OR must be boolean). We display implicit casts for arguments of functions - * and operators, since this is needed to be certain that the same function - * or operator will be chosen when the expression is re-parsed. - * ---------- - */ -static void -get_rule_expr(Node *node, deparse_context *context, - bool showimplicit) -{ - StringInfo buf = context->buf; - - if (node == NULL) - return; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - /* - * Each level of get_rule_expr must emit an indivisible term - * (parenthesized if necessary) to ensure result is reparsed into the same - * expression tree. The only exception is that when the input is a List, - * we emit the component items comma-separated with no surrounding - * decoration; this is convenient for most callers. - */ - switch (nodeTag(node)) - { - case T_Var: - (void) get_variable((Var *) node, 0, false, context); - break; - - case T_Const: - get_const_expr((Const *) node, context, 0); - break; - - case T_Param: - get_parameter((Param *) node, context); - break; - - case T_Aggref: - get_agg_expr((Aggref *) node, context, (Aggref *) node); - break; - - case T_GroupingFunc: - { - GroupingFunc *gexpr = (GroupingFunc *) node; - - appendStringInfoString(buf, "GROUPING("); - get_rule_expr((Node *) gexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_WindowFunc: - get_windowfunc_expr((WindowFunc *) node, context); - break; - - case T_SubscriptingRef: - { - SubscriptingRef *sbsref = (SubscriptingRef *) node; - bool need_parens; - - /* - * If the argument is a CaseTestExpr, we must be inside a - * FieldStore, ie, we are assigning to an element of an array - * within a composite column. Since we already punted on - * displaying the FieldStore's target information, just punt - * here too, and display only the assignment source - * expression. - */ - if (IsA(sbsref->refexpr, CaseTestExpr)) - { - Assert(sbsref->refassgnexpr); - get_rule_expr((Node *) sbsref->refassgnexpr, - context, showimplicit); - break; - } - - /* - * Parenthesize the argument unless it's a simple Var or a - * FieldSelect. (In particular, if it's another - * SubscriptingRef, we *must* parenthesize to avoid - * confusion.) - */ - need_parens = !IsA(sbsref->refexpr, Var) && - !IsA(sbsref->refexpr, FieldSelect); - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) sbsref->refexpr, context, showimplicit); - if (need_parens) - appendStringInfoChar(buf, ')'); - - /* - * If there's a refassgnexpr, we want to print the node in the - * format "container[subscripts] := refassgnexpr". This is - * not legal SQL, so decompilation of INSERT or UPDATE - * statements should always use processIndirection as part of - * the statement-level syntax. We should only see this when - * EXPLAIN tries to print the targetlist of a plan resulting - * from such a statement. - */ - if (sbsref->refassgnexpr) - { - Node *refassgnexpr; - - /* - * Use processIndirection to print this node's subscripts - * as well as any additional field selections or - * subscripting in immediate descendants. It returns the - * RHS expr that is actually being "assigned". - */ - refassgnexpr = processIndirection(node, context); - appendStringInfoString(buf, " := "); - get_rule_expr(refassgnexpr, context, showimplicit); - } - else - { - /* Just an ordinary container fetch, so print subscripts */ - printSubscripts(sbsref, context); - } - } - break; - - case T_FuncExpr: - get_func_expr((FuncExpr *) node, context, showimplicit); - break; - - case T_NamedArgExpr: - { - NamedArgExpr *na = (NamedArgExpr *) node; - - appendStringInfo(buf, "%s => ", quote_identifier(na->name)); - get_rule_expr((Node *) na->arg, context, showimplicit); - } - break; - - case T_OpExpr: - get_oper_expr((OpExpr *) node, context); - break; - - case T_DistinctExpr: - { - DistinctExpr *expr = (DistinctExpr *) node; - List *args = expr->args; - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg1, context, true, node); - appendStringInfoString(buf, " IS DISTINCT FROM "); - get_rule_expr_paren(arg2, context, true, node); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_NullIfExpr: - { - NullIfExpr *nullifexpr = (NullIfExpr *) node; - - appendStringInfoString(buf, "NULLIF("); - get_rule_expr((Node *) nullifexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_ScalarArrayOpExpr: - { - ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node; - List *args = expr->args; - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg1, context, true, node); - appendStringInfo(buf, " %s %s (", - generate_operator_name(expr->opno, - exprType(arg1), - get_base_element_type(exprType(arg2))), - expr->useOr ? "ANY" : "ALL"); - get_rule_expr_paren(arg2, context, true, node); - - /* - * There's inherent ambiguity in "x op ANY/ALL (y)" when y is - * a bare sub-SELECT. Since we're here, the sub-SELECT must - * be meant as a scalar sub-SELECT yielding an array value to - * be used in ScalarArrayOpExpr; but the grammar will - * preferentially interpret such a construct as an ANY/ALL - * SubLink. To prevent misparsing the output that way, insert - * a dummy coercion (which will be stripped by parse analysis, - * so no inefficiency is added in dump and reload). This is - * indeed most likely what the user wrote to get the construct - * accepted in the first place. - */ - if (IsA(arg2, SubLink) && - ((SubLink *) arg2)->subLinkType == EXPR_SUBLINK) - appendStringInfo(buf, "::%s", - format_type_with_typemod(exprType(arg2), - exprTypmod(arg2))); - appendStringInfoChar(buf, ')'); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_BoolExpr: - { - BoolExpr *expr = (BoolExpr *) node; - Node *first_arg = linitial(expr->args); - ListCell *arg; - - switch (expr->boolop) - { - case AND_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(first_arg, context, - false, node); - for_each_from(arg, expr->args, 1) - { - appendStringInfoString(buf, " AND "); - get_rule_expr_paren((Node *) lfirst(arg), context, - false, node); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - case OR_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(first_arg, context, - false, node); - for_each_from(arg, expr->args, 1) - { - appendStringInfoString(buf, " OR "); - get_rule_expr_paren((Node *) lfirst(arg), context, - false, node); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - case NOT_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - appendStringInfoString(buf, "NOT "); - get_rule_expr_paren(first_arg, context, - false, node); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - default: - elog(ERROR, "unrecognized boolop: %d", - (int) expr->boolop); - } - } - break; - - case T_SubLink: - get_sublink_expr((SubLink *) node, context); - break; - - case T_SubPlan: - { - SubPlan *subplan = (SubPlan *) node; - - /* - * We cannot see an already-planned subplan in rule deparsing, - * only while EXPLAINing a query plan. We don't try to - * reconstruct the original SQL, just reference the subplan - * that appears elsewhere in EXPLAIN's result. - */ - if (subplan->useHashTable) - appendStringInfo(buf, "(hashed %s)", subplan->plan_name); - else - appendStringInfo(buf, "(%s)", subplan->plan_name); - } - break; - - case T_AlternativeSubPlan: - { - AlternativeSubPlan *asplan = (AlternativeSubPlan *) node; - ListCell *lc; - - /* - * This case cannot be reached in normal usage, since no - * AlternativeSubPlan can appear either in parsetrees or - * finished plan trees. We keep it just in case somebody - * wants to use this code to print planner data structures. - */ - appendStringInfoString(buf, "(alternatives: "); - foreach(lc, asplan->subplans) - { - SubPlan *splan = lfirst_node(SubPlan, lc); - - if (splan->useHashTable) - appendStringInfo(buf, "hashed %s", splan->plan_name); - else - appendStringInfoString(buf, splan->plan_name); - if (lnext(asplan->subplans, lc)) - appendStringInfoString(buf, " or "); - } - appendStringInfoChar(buf, ')'); - } - break; - - case T_FieldSelect: - { - FieldSelect *fselect = (FieldSelect *) node; - Node *arg = (Node *) fselect->arg; - int fno = fselect->fieldnum; - const char *fieldname; - bool need_parens; - - /* - * Parenthesize the argument unless it's an SubscriptingRef or - * another FieldSelect. Note in particular that it would be - * WRONG to not parenthesize a Var argument; simplicity is not - * the issue here, having the right number of names is. - */ - need_parens = !IsA(arg, SubscriptingRef) && - !IsA(arg, FieldSelect); - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr(arg, context, true); - if (need_parens) - appendStringInfoChar(buf, ')'); - - /* - * Get and print the field name. - */ - fieldname = get_name_for_var_field((Var *) arg, fno, - 0, context); - appendStringInfo(buf, ".%s", quote_identifier(fieldname)); - } - break; - - case T_FieldStore: - { - FieldStore *fstore = (FieldStore *) node; - bool need_parens; - - /* - * There is no good way to represent a FieldStore as real SQL, - * so decompilation of INSERT or UPDATE statements should - * always use processIndirection as part of the - * statement-level syntax. We should only get here when - * EXPLAIN tries to print the targetlist of a plan resulting - * from such a statement. The plan case is even harder than - * ordinary rules would be, because the planner tries to - * collapse multiple assignments to the same field or subfield - * into one FieldStore; so we can see a list of target fields - * not just one, and the arguments could be FieldStores - * themselves. We don't bother to try to print the target - * field names; we just print the source arguments, with a - * ROW() around them if there's more than one. This isn't - * terribly complete, but it's probably good enough for - * EXPLAIN's purposes; especially since anything more would be - * either hopelessly confusing or an even poorer - * representation of what the plan is actually doing. - */ - need_parens = (list_length(fstore->newvals) != 1); - if (need_parens) - appendStringInfoString(buf, "ROW("); - get_rule_expr((Node *) fstore->newvals, context, showimplicit); - if (need_parens) - appendStringInfoChar(buf, ')'); - } - break; - - case T_RelabelType: - { - RelabelType *relabel = (RelabelType *) node; - Node *arg = (Node *) relabel->arg; - - if (relabel->relabelformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - relabel->resulttype, - relabel->resulttypmod, - node); - } - } - break; - - case T_CoerceViaIO: - { - CoerceViaIO *iocoerce = (CoerceViaIO *) node; - Node *arg = (Node *) iocoerce->arg; - - if (iocoerce->coerceformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - iocoerce->resulttype, - -1, - node); - } - } - break; - - case T_ArrayCoerceExpr: - { - ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; - Node *arg = (Node *) acoerce->arg; - - if (acoerce->coerceformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - acoerce->resulttype, - acoerce->resulttypmod, - node); - } - } - break; - - case T_ConvertRowtypeExpr: - { - ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node; - Node *arg = (Node *) convert->arg; - - if (convert->convertformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - convert->resulttype, -1, - node); - } - } - break; - - case T_CollateExpr: - { - CollateExpr *collate = (CollateExpr *) node; - Node *arg = (Node *) collate->arg; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg, context, showimplicit, node); - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(collate->collOid)); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_CaseExpr: - { - CaseExpr *caseexpr = (CaseExpr *) node; - ListCell *temp; - - appendContextKeyword(context, "CASE", - 0, PRETTYINDENT_VAR, 0); - if (caseexpr->arg) - { - appendStringInfoChar(buf, ' '); - get_rule_expr((Node *) caseexpr->arg, context, true); - } - foreach(temp, caseexpr->args) - { - CaseWhen *when = (CaseWhen *) lfirst(temp); - Node *w = (Node *) when->expr; - - if (caseexpr->arg) - { - /* - * The parser should have produced WHEN clauses of the - * form "CaseTestExpr = RHS", possibly with an - * implicit coercion inserted above the CaseTestExpr. - * For accurate decompilation of rules it's essential - * that we show just the RHS. However in an - * expression that's been through the optimizer, the - * WHEN clause could be almost anything (since the - * equality operator could have been expanded into an - * inline function). If we don't recognize the form - * of the WHEN clause, just punt and display it as-is. - */ - if (IsA(w, OpExpr)) - { - List *args = ((OpExpr *) w)->args; - - if (list_length(args) == 2 && - IsA(strip_implicit_coercions(linitial(args)), - CaseTestExpr)) - w = (Node *) lsecond(args); - } - } - - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "WHEN ", - 0, 0, 0); - get_rule_expr(w, context, false); - appendStringInfoString(buf, " THEN "); - get_rule_expr((Node *) when->result, context, true); - } - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "ELSE ", - 0, 0, 0); - get_rule_expr((Node *) caseexpr->defresult, context, true); - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "END", - -PRETTYINDENT_VAR, 0, 0); - } - break; - - case T_CaseTestExpr: - { - /* - * Normally we should never get here, since for expressions - * that can contain this node type we attempt to avoid - * recursing to it. But in an optimized expression we might - * be unable to avoid that (see comments for CaseExpr). If we - * do see one, print it as CASE_TEST_EXPR. - */ - appendStringInfoString(buf, "CASE_TEST_EXPR"); - } - break; - - case T_ArrayExpr: - { - ArrayExpr *arrayexpr = (ArrayExpr *) node; - - appendStringInfoString(buf, "ARRAY["); - get_rule_expr((Node *) arrayexpr->elements, context, true); - appendStringInfoChar(buf, ']'); - - /* - * If the array isn't empty, we assume its elements are - * coerced to the desired type. If it's empty, though, we - * need an explicit coercion to the array type. - */ - if (arrayexpr->elements == NIL) - appendStringInfo(buf, "::%s", - format_type_with_typemod(arrayexpr->array_typeid, -1)); - } - break; - - case T_RowExpr: - { - RowExpr *rowexpr = (RowExpr *) node; - TupleDesc tupdesc = NULL; - ListCell *arg; - int i; - char *sep; - - /* - * If it's a named type and not RECORD, we may have to skip - * dropped columns and/or claim there are NULLs for added - * columns. - */ - if (rowexpr->row_typeid != RECORDOID) - { - tupdesc = lookup_rowtype_tupdesc(rowexpr->row_typeid, -1); - Assert(list_length(rowexpr->args) <= tupdesc->natts); - } - - /* - * SQL99 allows "ROW" to be omitted when there is more than - * one column, but for simplicity we always print it. - */ - appendStringInfoString(buf, "ROW("); - sep = ""; - i = 0; - foreach(arg, rowexpr->args) - { - Node *e = (Node *) lfirst(arg); - - if (tupdesc == NULL || - !TupleDescAttr(tupdesc, i)->attisdropped) - { - appendStringInfoString(buf, sep); - /* Whole-row Vars need special treatment here */ - get_rule_expr_toplevel(e, context, true); - sep = ", "; - } - i++; - } - if (tupdesc != NULL) - { - while (i < tupdesc->natts) - { - if (!TupleDescAttr(tupdesc, i)->attisdropped) - { - appendStringInfoString(buf, sep); - appendStringInfoString(buf, "NULL"); - sep = ", "; - } - i++; - } - - ReleaseTupleDesc(tupdesc); - } - appendStringInfoChar(buf, ')'); - if (rowexpr->row_format == COERCE_EXPLICIT_CAST) - appendStringInfo(buf, "::%s", - format_type_with_typemod(rowexpr->row_typeid, -1)); - } - break; - - case T_RowCompareExpr: - { - RowCompareExpr *rcexpr = (RowCompareExpr *) node; - - /* - * SQL99 allows "ROW" to be omitted when there is more than - * one column, but for simplicity we always print it. Within - * a ROW expression, whole-row Vars need special treatment, so - * use get_rule_list_toplevel. - */ - appendStringInfoString(buf, "(ROW("); - get_rule_list_toplevel(rcexpr->largs, context, true); - - /* - * We assume that the name of the first-column operator will - * do for all the rest too. This is definitely open to - * failure, eg if some but not all operators were renamed - * since the construct was parsed, but there seems no way to - * be perfect. - */ - appendStringInfo(buf, ") %s ROW(", - generate_operator_name(linitial_oid(rcexpr->opnos), - exprType(linitial(rcexpr->largs)), - exprType(linitial(rcexpr->rargs)))); - get_rule_list_toplevel(rcexpr->rargs, context, true); - appendStringInfoString(buf, "))"); - } - break; - - case T_CoalesceExpr: - { - CoalesceExpr *coalesceexpr = (CoalesceExpr *) node; - - appendStringInfoString(buf, "COALESCE("); - get_rule_expr((Node *) coalesceexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_MinMaxExpr: - { - MinMaxExpr *minmaxexpr = (MinMaxExpr *) node; - - switch (minmaxexpr->op) - { - case IS_GREATEST: - appendStringInfoString(buf, "GREATEST("); - break; - case IS_LEAST: - appendStringInfoString(buf, "LEAST("); - break; - } - get_rule_expr((Node *) minmaxexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_SQLValueFunction: - { - SQLValueFunction *svf = (SQLValueFunction *) node; - - /* - * Note: this code knows that typmod for time, timestamp, and - * timestamptz just prints as integer. - */ - switch (svf->op) - { - case SVFOP_CURRENT_DATE: - appendStringInfoString(buf, "CURRENT_DATE"); - break; - case SVFOP_CURRENT_TIME: - appendStringInfoString(buf, "CURRENT_TIME"); - break; - case SVFOP_CURRENT_TIME_N: - appendStringInfo(buf, "CURRENT_TIME(%d)", svf->typmod); - break; - case SVFOP_CURRENT_TIMESTAMP: - appendStringInfoString(buf, "CURRENT_TIMESTAMP"); - break; - case SVFOP_CURRENT_TIMESTAMP_N: - appendStringInfo(buf, "CURRENT_TIMESTAMP(%d)", - svf->typmod); - break; - case SVFOP_LOCALTIME: - appendStringInfoString(buf, "LOCALTIME"); - break; - case SVFOP_LOCALTIME_N: - appendStringInfo(buf, "LOCALTIME(%d)", svf->typmod); - break; - case SVFOP_LOCALTIMESTAMP: - appendStringInfoString(buf, "LOCALTIMESTAMP"); - break; - case SVFOP_LOCALTIMESTAMP_N: - appendStringInfo(buf, "LOCALTIMESTAMP(%d)", - svf->typmod); - break; - case SVFOP_CURRENT_ROLE: - appendStringInfoString(buf, "CURRENT_ROLE"); - break; - case SVFOP_CURRENT_USER: - appendStringInfoString(buf, "CURRENT_USER"); - break; - case SVFOP_USER: - appendStringInfoString(buf, "USER"); - break; - case SVFOP_SESSION_USER: - appendStringInfoString(buf, "SESSION_USER"); - break; - case SVFOP_CURRENT_CATALOG: - appendStringInfoString(buf, "CURRENT_CATALOG"); - break; - case SVFOP_CURRENT_SCHEMA: - appendStringInfoString(buf, "CURRENT_SCHEMA"); - break; - } - } - break; - - case T_XmlExpr: - { - XmlExpr *xexpr = (XmlExpr *) node; - bool needcomma = false; - ListCell *arg; - ListCell *narg; - Const *con; - - switch (xexpr->op) - { - case IS_XMLCONCAT: - appendStringInfoString(buf, "XMLCONCAT("); - break; - case IS_XMLELEMENT: - appendStringInfoString(buf, "XMLELEMENT("); - break; - case IS_XMLFOREST: - appendStringInfoString(buf, "XMLFOREST("); - break; - case IS_XMLPARSE: - appendStringInfoString(buf, "XMLPARSE("); - break; - case IS_XMLPI: - appendStringInfoString(buf, "XMLPI("); - break; - case IS_XMLROOT: - appendStringInfoString(buf, "XMLROOT("); - break; - case IS_XMLSERIALIZE: - appendStringInfoString(buf, "XMLSERIALIZE("); - break; - case IS_DOCUMENT: - break; - } - if (xexpr->op == IS_XMLPARSE || xexpr->op == IS_XMLSERIALIZE) - { - if (xexpr->xmloption == XMLOPTION_DOCUMENT) - appendStringInfoString(buf, "DOCUMENT "); - else - appendStringInfoString(buf, "CONTENT "); - } - if (xexpr->name) - { - appendStringInfo(buf, "NAME %s", - quote_identifier(map_xml_name_to_sql_identifier(xexpr->name))); - needcomma = true; - } - if (xexpr->named_args) - { - if (xexpr->op != IS_XMLFOREST) - { - if (needcomma) - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, "XMLATTRIBUTES("); - needcomma = false; - } - forboth(arg, xexpr->named_args, narg, xexpr->arg_names) - { - Node *e = (Node *) lfirst(arg); - char *argname = strVal(lfirst(narg)); - - if (needcomma) - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) e, context, true); - appendStringInfo(buf, " AS %s", - quote_identifier(map_xml_name_to_sql_identifier(argname))); - needcomma = true; - } - if (xexpr->op != IS_XMLFOREST) - appendStringInfoChar(buf, ')'); - } - if (xexpr->args) - { - if (needcomma) - appendStringInfoString(buf, ", "); - switch (xexpr->op) - { - case IS_XMLCONCAT: - case IS_XMLELEMENT: - case IS_XMLFOREST: - case IS_XMLPI: - case IS_XMLSERIALIZE: - /* no extra decoration needed */ - get_rule_expr((Node *) xexpr->args, context, true); - break; - case IS_XMLPARSE: - Assert(list_length(xexpr->args) == 2); - - get_rule_expr((Node *) linitial(xexpr->args), - context, true); - - con = lsecond_node(Const, xexpr->args); - Assert(!con->constisnull); - if (DatumGetBool(con->constvalue)) - appendStringInfoString(buf, - " PRESERVE WHITESPACE"); - else - appendStringInfoString(buf, - " STRIP WHITESPACE"); - break; - case IS_XMLROOT: - Assert(list_length(xexpr->args) == 3); - - get_rule_expr((Node *) linitial(xexpr->args), - context, true); - - appendStringInfoString(buf, ", VERSION "); - con = (Const *) lsecond(xexpr->args); - if (IsA(con, Const) && - con->constisnull) - appendStringInfoString(buf, "NO VALUE"); - else - get_rule_expr((Node *) con, context, false); - - con = lthird_node(Const, xexpr->args); - if (con->constisnull) - /* suppress STANDALONE NO VALUE */ ; - else - { - switch (DatumGetInt32(con->constvalue)) - { - case XML_STANDALONE_YES: - appendStringInfoString(buf, - ", STANDALONE YES"); - break; - case XML_STANDALONE_NO: - appendStringInfoString(buf, - ", STANDALONE NO"); - break; - case XML_STANDALONE_NO_VALUE: - appendStringInfoString(buf, - ", STANDALONE NO VALUE"); - break; - default: - break; - } - } - break; - case IS_DOCUMENT: - get_rule_expr_paren((Node *) xexpr->args, context, false, node); - break; - } - } - if (xexpr->op == IS_XMLSERIALIZE) - appendStringInfo(buf, " AS %s", - format_type_with_typemod(xexpr->type, - xexpr->typmod)); - if (xexpr->op == IS_DOCUMENT) - appendStringInfoString(buf, " IS DOCUMENT"); - else - appendStringInfoChar(buf, ')'); - } - break; - - case T_NullTest: - { - NullTest *ntest = (NullTest *) node; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren((Node *) ntest->arg, context, true, node); - - /* - * For scalar inputs, we prefer to print as IS [NOT] NULL, - * which is shorter and traditional. If it's a rowtype input - * but we're applying a scalar test, must print IS [NOT] - * DISTINCT FROM NULL to be semantically correct. - */ - if (ntest->argisrow || - !type_is_rowtype(exprType((Node *) ntest->arg))) - { - switch (ntest->nulltesttype) - { - case IS_NULL: - appendStringInfoString(buf, " IS NULL"); - break; - case IS_NOT_NULL: - appendStringInfoString(buf, " IS NOT NULL"); - break; - default: - elog(ERROR, "unrecognized nulltesttype: %d", - (int) ntest->nulltesttype); - } - } - else - { - switch (ntest->nulltesttype) - { - case IS_NULL: - appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL"); - break; - case IS_NOT_NULL: - appendStringInfoString(buf, " IS DISTINCT FROM NULL"); - break; - default: - elog(ERROR, "unrecognized nulltesttype: %d", - (int) ntest->nulltesttype); - } - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_BooleanTest: - { - BooleanTest *btest = (BooleanTest *) node; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren((Node *) btest->arg, context, false, node); - switch (btest->booltesttype) - { - case IS_TRUE: - appendStringInfoString(buf, " IS TRUE"); - break; - case IS_NOT_TRUE: - appendStringInfoString(buf, " IS NOT TRUE"); - break; - case IS_FALSE: - appendStringInfoString(buf, " IS FALSE"); - break; - case IS_NOT_FALSE: - appendStringInfoString(buf, " IS NOT FALSE"); - break; - case IS_UNKNOWN: - appendStringInfoString(buf, " IS UNKNOWN"); - break; - case IS_NOT_UNKNOWN: - appendStringInfoString(buf, " IS NOT UNKNOWN"); - break; - default: - elog(ERROR, "unrecognized booltesttype: %d", - (int) btest->booltesttype); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_CoerceToDomain: - { - CoerceToDomain *ctest = (CoerceToDomain *) node; - Node *arg = (Node *) ctest->arg; - - if (ctest->coercionformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr(arg, context, false); - } - else - { - get_coercion_expr(arg, context, - ctest->resulttype, - ctest->resulttypmod, - node); - } - } - break; - - case T_CoerceToDomainValue: - appendStringInfoString(buf, "VALUE"); - break; - - case T_SetToDefault: - appendStringInfoString(buf, "DEFAULT"); - break; - - case T_CurrentOfExpr: - { - CurrentOfExpr *cexpr = (CurrentOfExpr *) node; - - if (cexpr->cursor_name) - appendStringInfo(buf, "CURRENT OF %s", - quote_identifier(cexpr->cursor_name)); - else - appendStringInfo(buf, "CURRENT OF $%d", - cexpr->cursor_param); - } - break; - - case T_NextValueExpr: - { - NextValueExpr *nvexpr = (NextValueExpr *) node; - - /* - * This isn't exactly nextval(), but that seems close enough - * for EXPLAIN's purposes. - */ - appendStringInfoString(buf, "nextval("); - simple_quote_literal(buf, - generate_relation_name(nvexpr->seqid, - NIL)); - appendStringInfoChar(buf, ')'); - } - break; - - case T_InferenceElem: - { - InferenceElem *iexpr = (InferenceElem *) node; - bool save_varprefix; - bool need_parens; - - /* - * InferenceElem can only refer to target relation, so a - * prefix is not useful, and indeed would cause parse errors. - */ - save_varprefix = context->varprefix; - context->varprefix = false; - - /* - * Parenthesize the element unless it's a simple Var or a bare - * function call. Follows pg_get_indexdef_worker(). - */ - need_parens = !IsA(iexpr->expr, Var); - if (IsA(iexpr->expr, FuncExpr) && - ((FuncExpr *) iexpr->expr)->funcformat == - COERCE_EXPLICIT_CALL) - need_parens = false; - - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) iexpr->expr, - context, false); - if (need_parens) - appendStringInfoChar(buf, ')'); - - context->varprefix = save_varprefix; - - if (iexpr->infercollid) - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(iexpr->infercollid)); - - /* Add the operator class name, if not default */ - if (iexpr->inferopclass) - { - Oid inferopclass = iexpr->inferopclass; - Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass); - - get_opclass_name(inferopclass, inferopcinputtype, buf); - } - } - break; - - case T_PartitionBoundSpec: - { - PartitionBoundSpec *spec = (PartitionBoundSpec *) node; - ListCell *cell; - char *sep; - - if (spec->is_default) - { - appendStringInfoString(buf, "DEFAULT"); - break; - } - - switch (spec->strategy) - { - case PARTITION_STRATEGY_HASH: - Assert(spec->modulus > 0 && spec->remainder >= 0); - Assert(spec->modulus > spec->remainder); - - appendStringInfoString(buf, "FOR VALUES"); - appendStringInfo(buf, " WITH (modulus %d, remainder %d)", - spec->modulus, spec->remainder); - break; - - case PARTITION_STRATEGY_LIST: - Assert(spec->listdatums != NIL); - - appendStringInfoString(buf, "FOR VALUES IN ("); - sep = ""; - foreach(cell, spec->listdatums) - { - Const *val = lfirst_node(Const, cell); - - appendStringInfoString(buf, sep); - get_const_expr(val, context, -1); - sep = ", "; - } - - appendStringInfoChar(buf, ')'); - break; - - case PARTITION_STRATEGY_RANGE: - Assert(spec->lowerdatums != NIL && - spec->upperdatums != NIL && - list_length(spec->lowerdatums) == - list_length(spec->upperdatums)); - - appendStringInfo(buf, "FOR VALUES FROM %s TO %s", - get_range_partbound_string(spec->lowerdatums), - get_range_partbound_string(spec->upperdatums)); - break; - - default: - elog(ERROR, "unrecognized partition strategy: %d", - (int) spec->strategy); - break; - } - } - break; - - case T_List: - { - char *sep; - ListCell *l; - - sep = ""; - foreach(l, (List *) node) - { - appendStringInfoString(buf, sep); - get_rule_expr((Node *) lfirst(l), context, showimplicit); - sep = ", "; - } - } - break; - - case T_TableFunc: - get_tablefunc((TableFunc *) node, context, showimplicit); - break; - - case T_CallStmt: - get_proc_expr((CallStmt *) node, context, showimplicit); - break; - - default: - elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); - break; - } -} - -/* - * get_rule_expr_toplevel - Parse back a toplevel expression - * - * Same as get_rule_expr(), except that if the expr is just a Var, we pass - * istoplevel = true not false to get_variable(). This causes whole-row Vars - * to get printed with decoration that will prevent expansion of "*". - * We need to use this in contexts such as ROW() and VALUES(), where the - * parser would expand "foo.*" appearing at top level. (In principle we'd - * use this in get_target_list() too, but that has additional worries about - * whether to print AS, so it needs to invoke get_variable() directly anyway.) - */ -static void -get_rule_expr_toplevel(Node *node, deparse_context *context, - bool showimplicit) -{ - if (node && IsA(node, Var)) - (void) get_variable((Var *) node, 0, true, context); - else - get_rule_expr(node, context, showimplicit); -} - -/* - * get_rule_list_toplevel - Parse back a list of toplevel expressions - * - * Apply get_rule_expr_toplevel() to each element of a List. - * - * This adds commas between the expressions, but caller is responsible - * for printing surrounding decoration. - */ -static void -get_rule_list_toplevel(List *lst, deparse_context *context, - bool showimplicit) -{ - const char *sep; - ListCell *lc; - - sep = ""; - foreach(lc, lst) - { - Node *e = (Node *) lfirst(lc); - - appendStringInfoString(context->buf, sep); - get_rule_expr_toplevel(e, context, showimplicit); - sep = ", "; - } -} - -/* - * get_rule_expr_funccall - Parse back a function-call expression - * - * Same as get_rule_expr(), except that we guarantee that the output will - * look like a function call, or like one of the things the grammar treats as - * equivalent to a function call (see the func_expr_windowless production). - * This is needed in places where the grammar uses func_expr_windowless and - * you can't substitute a parenthesized a_expr. If what we have isn't going - * to look like a function call, wrap it in a dummy CAST() expression, which - * will satisfy the grammar --- and, indeed, is likely what the user wrote to - * produce such a thing. - */ -static void -get_rule_expr_funccall(Node *node, deparse_context *context, - bool showimplicit) -{ - if (looks_like_function(node)) - get_rule_expr(node, context, showimplicit); - else - { - StringInfo buf = context->buf; - - appendStringInfoString(buf, "CAST("); - /* no point in showing any top-level implicit cast */ - get_rule_expr(node, context, false); - appendStringInfo(buf, " AS %s)", - format_type_with_typemod(exprType(node), - exprTypmod(node))); - } -} - -/* - * Helper function to identify node types that satisfy func_expr_windowless. - * If in doubt, "false" is always a safe answer. - */ -static bool -looks_like_function(Node *node) -{ - if (node == NULL) - return false; /* probably shouldn't happen */ - switch (nodeTag(node)) - { - case T_FuncExpr: - /* OK, unless it's going to deparse as a cast */ - return (((FuncExpr *) node)->funcformat == COERCE_EXPLICIT_CALL || - ((FuncExpr *) node)->funcformat == COERCE_SQL_SYNTAX); - case T_NullIfExpr: - case T_CoalesceExpr: - case T_MinMaxExpr: - case T_SQLValueFunction: - case T_XmlExpr: - /* these are all accepted by func_expr_common_subexpr */ - return true; - default: - break; - } - return false; -} - -/* - * get_oper_expr - Parse back an OpExpr node - */ -static void -get_oper_expr(OpExpr *expr, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid opno = expr->opno; - List *args = expr->args; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - if (list_length(args) == 2) - { - /* binary operator */ - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - get_rule_expr_paren(arg1, context, true, (Node *) expr); - appendStringInfo(buf, " %s ", - generate_operator_name(opno, - exprType(arg1), - exprType(arg2))); - get_rule_expr_paren(arg2, context, true, (Node *) expr); - } - else - { - /* prefix operator */ - Node *arg = (Node *) linitial(args); - - appendStringInfo(buf, "%s ", - generate_operator_name(opno, - InvalidOid, - exprType(arg))); - get_rule_expr_paren(arg, context, true, (Node *) expr); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); -} - -/* - * get_func_expr - Parse back a FuncExpr node - */ -static void -get_func_expr(FuncExpr *expr, deparse_context *context, - bool showimplicit) -{ - StringInfo buf = context->buf; - Oid funcoid = expr->funcid; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - List *argnames; - bool use_variadic; - ListCell *l; - - /* - * If the function call came from an implicit coercion, then just show the - * first argument --- unless caller wants to see implicit coercions. - */ - if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit) - { - get_rule_expr_paren((Node *) linitial(expr->args), context, - false, (Node *) expr); - return; - } - - /* - * If the function call came from a cast, then show the first argument - * plus an explicit cast operation. - */ - if (expr->funcformat == COERCE_EXPLICIT_CAST || - expr->funcformat == COERCE_IMPLICIT_CAST) - { - Node *arg = linitial(expr->args); - Oid rettype = expr->funcresulttype; - int32 coercedTypmod; - - /* Get the typmod if this is a length-coercion function */ - (void) exprIsLengthCoercion((Node *) expr, &coercedTypmod); - - get_coercion_expr(arg, context, - rettype, coercedTypmod, - (Node *) expr); - - return; - } - - /* - * If the function was called using one of the SQL spec's random special - * syntaxes, try to reproduce that. If we don't recognize the function, - * fall through. - */ - if (expr->funcformat == COERCE_SQL_SYNTAX) - { - if (get_func_sql_syntax(expr, context)) - return; - } - - - /* - * Normal function: display as proname(args). First we need to extract - * the argument datatypes. - */ - if (list_length(expr->args) > FUNC_MAX_ARGS) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg("too many arguments"))); - nargs = 0; - argnames = NIL; - foreach(l, expr->args) - { - Node *arg = (Node *) lfirst(l); - - if (IsA(arg, NamedArgExpr)) - argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); - argtypes[nargs] = exprType(arg); - nargs++; - } - - appendStringInfo(buf, "%s(", - generate_function_name(funcoid, nargs, - argnames, argtypes, - expr->funcvariadic, - &use_variadic, - context->inGroupBy)); - nargs = 0; - foreach(l, expr->args) - { - if (nargs++ > 0) - appendStringInfoString(buf, ", "); - if (use_variadic && lnext(expr->args, l) == NULL) - appendStringInfoString(buf, "VARIADIC "); - get_rule_expr((Node *) lfirst(l), context, true); - } - - appendStringInfoChar(buf, ')'); -} - -/* - * get_proc_expr - Parse back a CallStmt node - */ -static void -get_proc_expr(CallStmt *stmt, deparse_context *context, - bool showimplicit) -{ - StringInfo buf = context->buf; - Oid functionOid = stmt->funcexpr->funcid; - bool use_variadic; - Oid *argumentTypes; - List *finalArgumentList = NIL; - ListCell *argumentCell; - List *namedArgList = NIL; - int numberOfArgs = -1; - - if (!get_merged_argument_list(stmt, &namedArgList, &argumentTypes, - &finalArgumentList, &numberOfArgs)) - { - /* Nothing merged i.e. no OUT arguments */ - get_func_expr((FuncExpr *) stmt->funcexpr, context, showimplicit); - return; - } - - appendStringInfo(buf, "%s(", - generate_function_name(functionOid, numberOfArgs, - namedArgList, argumentTypes, - stmt->funcexpr->funcvariadic, - &use_variadic, - context->inGroupBy)); - int argNumber = 0; - foreach(argumentCell, finalArgumentList) - { - if (argNumber++ > 0) - appendStringInfoString(buf, ", "); - if (use_variadic && lnext(finalArgumentList, argumentCell) == NULL) - appendStringInfoString(buf, "VARIADIC "); - get_rule_expr((Node *) lfirst(argumentCell), context, true); - argNumber++; - } - - appendStringInfoChar(buf, ')'); -} - -/* - * get_agg_expr - Parse back an Aggref node - */ -static void -get_agg_expr(Aggref *aggref, deparse_context *context, - Aggref *original_aggref) -{ - StringInfo buf = context->buf; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - bool use_variadic; - - /* - * For a combining aggregate, we look up and deparse the corresponding - * partial aggregate instead. This is necessary because our input - * argument list has been replaced; the new argument list always has just - * one element, which will point to a partial Aggref that supplies us with - * transition states to combine. - */ - if (DO_AGGSPLIT_COMBINE(aggref->aggsplit)) - { - TargetEntry *tle; - - - Assert(list_length(aggref->args) == 1); - tle = linitial_node(TargetEntry, aggref->args); - resolve_special_varno((Node *) tle->expr, context, - get_agg_combine_expr, original_aggref); - return; - } - - /* - * Mark as PARTIAL, if appropriate. We look to the original aggref so as - * to avoid printing this when recursing from the code just above. - */ - if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit)) - appendStringInfoString(buf, "PARTIAL "); - - /* Extract the argument types as seen by the parser */ - nargs = get_aggregate_argtypes(aggref, argtypes); - - /* Print the aggregate name, schema-qualified if needed */ - appendStringInfo(buf, "%s(%s", - generate_function_name(aggref->aggfnoid, nargs, - NIL, argtypes, - aggref->aggvariadic, - &use_variadic, - context->inGroupBy), - (aggref->aggdistinct != NIL) ? "DISTINCT " : ""); - - if (AGGKIND_IS_ORDERED_SET(aggref->aggkind)) - { - /* - * Ordered-set aggregates do not use "*" syntax. Also, we needn't - * worry about inserting VARIADIC. So we can just dump the direct - * args as-is. - */ - Assert(!aggref->aggvariadic); - get_rule_expr((Node *) aggref->aggdirectargs, context, true); - Assert(aggref->aggorder != NIL); - appendStringInfoString(buf, ") WITHIN GROUP (ORDER BY "); - get_rule_orderby(aggref->aggorder, aggref->args, false, context); - } - else - { - /* aggstar can be set only in zero-argument aggregates */ - if (aggref->aggstar) - appendStringInfoChar(buf, '*'); - else - { - ListCell *l; - int i; - - i = 0; - foreach(l, aggref->args) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - Node *arg = (Node *) tle->expr; - - Assert(!IsA(arg, NamedArgExpr)); - if (tle->resjunk) - continue; - if (i++ > 0) - appendStringInfoString(buf, ", "); - if (use_variadic && i == nargs) - appendStringInfoString(buf, "VARIADIC "); - get_rule_expr(arg, context, true); - } - } - - if (aggref->aggorder != NIL) - { - appendStringInfoString(buf, " ORDER BY "); - get_rule_orderby(aggref->aggorder, aggref->args, false, context); - } - } - - if (aggref->aggfilter != NULL) - { - appendStringInfoString(buf, ") FILTER (WHERE "); - get_rule_expr((Node *) aggref->aggfilter, context, false); - } - - appendStringInfoChar(buf, ')'); -} - -/* - * This is a helper function for get_agg_expr(). It's used when we deparse - * a combining Aggref; resolve_special_varno locates the corresponding partial - * Aggref and then calls this. - */ -static void -get_agg_combine_expr(Node *node, deparse_context *context, void *callback_arg) -{ - Aggref *aggref; - Aggref *original_aggref = callback_arg; - - if (!IsA(node, Aggref)) - elog(ERROR, "combining Aggref does not point to an Aggref"); - - aggref = (Aggref *) node; - get_agg_expr(aggref, context, original_aggref); -} - -/* - * get_windowfunc_expr - Parse back a WindowFunc node - */ -static void -get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - List *argnames; - ListCell *l; - - if (list_length(wfunc->args) > FUNC_MAX_ARGS) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg("too many arguments"))); - nargs = 0; - argnames = NIL; - foreach(l, wfunc->args) - { - Node *arg = (Node *) lfirst(l); - - if (IsA(arg, NamedArgExpr)) - argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); - argtypes[nargs] = exprType(arg); - nargs++; - } - - appendStringInfo(buf, "%s(", - generate_function_name(wfunc->winfnoid, nargs, - argnames, argtypes, - false, NULL, - context->inGroupBy)); - - /* winstar can be set only in zero-argument aggregates */ - if (wfunc->winstar) - appendStringInfoChar(buf, '*'); - else - get_rule_expr((Node *) wfunc->args, context, true); - - if (wfunc->aggfilter != NULL) - { - appendStringInfoString(buf, ") FILTER (WHERE "); - get_rule_expr((Node *) wfunc->aggfilter, context, false); - } - - appendStringInfoString(buf, ") OVER "); - - foreach(l, context->windowClause) - { - WindowClause *wc = (WindowClause *) lfirst(l); - - if (wc->winref == wfunc->winref) - { - if (wc->name) - appendStringInfoString(buf, quote_identifier(wc->name)); - else - get_rule_windowspec(wc, context->targetList, context); - break; - } - } - if (l == NULL) - { - if (context->windowClause) - elog(ERROR, "could not find window clause for winref %u", - wfunc->winref); - - /* - * In EXPLAIN, we don't have window context information available, so - * we have to settle for this: - */ - appendStringInfoString(buf, "(?)"); - } -} - -/* - * get_func_sql_syntax - Parse back a SQL-syntax function call - * - * Returns true if we successfully deparsed, false if we did not - * recognize the function. - */ -static bool -get_func_sql_syntax(FuncExpr *expr, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid funcoid = expr->funcid; - - switch (funcoid) - { - case F_TIMEZONE_INTERVAL_TIMESTAMP: - case F_TIMEZONE_INTERVAL_TIMESTAMPTZ: - case F_TIMEZONE_INTERVAL_TIMETZ: - case F_TIMEZONE_TEXT_TIMESTAMP: - case F_TIMEZONE_TEXT_TIMESTAMPTZ: - case F_TIMEZONE_TEXT_TIMETZ: - /* AT TIME ZONE ... note reversed argument order */ - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) lsecond(expr->args), context, false); - appendStringInfoString(buf, " AT TIME ZONE "); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoChar(buf, ')'); - return true; - - case F_OVERLAPS_TIMESTAMPTZ_INTERVAL_TIMESTAMPTZ_INTERVAL: - case F_OVERLAPS_TIMESTAMPTZ_INTERVAL_TIMESTAMPTZ_TIMESTAMPTZ: - case F_OVERLAPS_TIMESTAMPTZ_TIMESTAMPTZ_TIMESTAMPTZ_INTERVAL: - case F_OVERLAPS_TIMESTAMPTZ_TIMESTAMPTZ_TIMESTAMPTZ_TIMESTAMPTZ: - case F_OVERLAPS_TIMESTAMP_INTERVAL_TIMESTAMP_INTERVAL: - case F_OVERLAPS_TIMESTAMP_INTERVAL_TIMESTAMP_TIMESTAMP: - case F_OVERLAPS_TIMESTAMP_TIMESTAMP_TIMESTAMP_INTERVAL: - case F_OVERLAPS_TIMESTAMP_TIMESTAMP_TIMESTAMP_TIMESTAMP: - case F_OVERLAPS_TIMETZ_TIMETZ_TIMETZ_TIMETZ: - case F_OVERLAPS_TIME_INTERVAL_TIME_INTERVAL: - case F_OVERLAPS_TIME_INTERVAL_TIME_TIME: - case F_OVERLAPS_TIME_TIME_TIME_INTERVAL: - case F_OVERLAPS_TIME_TIME_TIME_TIME: - /* (x1, x2) OVERLAPS (y1, y2) */ - appendStringInfoString(buf, "(("); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) lsecond(expr->args), context, false); - appendStringInfoString(buf, ") OVERLAPS ("); - get_rule_expr((Node *) lthird(expr->args), context, false); - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) lfourth(expr->args), context, false); - appendStringInfoString(buf, "))"); - return true; - - case F_EXTRACT_TEXT_DATE: - case F_EXTRACT_TEXT_TIME: - case F_EXTRACT_TEXT_TIMETZ: - case F_EXTRACT_TEXT_TIMESTAMP: - case F_EXTRACT_TEXT_TIMESTAMPTZ: - case F_EXTRACT_TEXT_INTERVAL: - /* EXTRACT (x FROM y) */ - appendStringInfoString(buf, "EXTRACT("); - { - Const *con = (Const *) linitial(expr->args); - - Assert(IsA(con, Const) && - con->consttype == TEXTOID && - !con->constisnull); - appendStringInfoString(buf, TextDatumGetCString(con->constvalue)); - } - appendStringInfoString(buf, " FROM "); - get_rule_expr((Node *) lsecond(expr->args), context, false); - appendStringInfoChar(buf, ')'); - return true; - - case F_IS_NORMALIZED: - /* IS xxx NORMALIZED */ - appendStringInfoString(buf, "(("); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoString(buf, ") IS"); - if (list_length(expr->args) == 2) - { - Const *con = (Const *) lsecond(expr->args); - - Assert(IsA(con, Const) && - con->consttype == TEXTOID && - !con->constisnull); - appendStringInfo(buf, " %s", - TextDatumGetCString(con->constvalue)); - } - appendStringInfoString(buf, " NORMALIZED)"); - return true; - - case F_PG_COLLATION_FOR: - /* COLLATION FOR */ - appendStringInfoString(buf, "COLLATION FOR ("); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoChar(buf, ')'); - return true; - - /* - * XXX EXTRACT, a/k/a date_part(), is intentionally not covered - * yet. Add it after we change the return type to numeric. - */ - - case F_NORMALIZE: - /* NORMALIZE() */ - appendStringInfoString(buf, "NORMALIZE("); - get_rule_expr((Node *) linitial(expr->args), context, false); - if (list_length(expr->args) == 2) - { - Const *con = (Const *) lsecond(expr->args); - - Assert(IsA(con, Const) && - con->consttype == TEXTOID && - !con->constisnull); - appendStringInfo(buf, ", %s", - TextDatumGetCString(con->constvalue)); - } - appendStringInfoChar(buf, ')'); - return true; - - case F_OVERLAY_BIT_BIT_INT4: - case F_OVERLAY_BIT_BIT_INT4_INT4: - case F_OVERLAY_BYTEA_BYTEA_INT4: - case F_OVERLAY_BYTEA_BYTEA_INT4_INT4: - case F_OVERLAY_TEXT_TEXT_INT4: - case F_OVERLAY_TEXT_TEXT_INT4_INT4: - /* OVERLAY() */ - appendStringInfoString(buf, "OVERLAY("); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoString(buf, " PLACING "); - get_rule_expr((Node *) lsecond(expr->args), context, false); - appendStringInfoString(buf, " FROM "); - get_rule_expr((Node *) lthird(expr->args), context, false); - if (list_length(expr->args) == 4) - { - appendStringInfoString(buf, " FOR "); - get_rule_expr((Node *) lfourth(expr->args), context, false); - } - appendStringInfoChar(buf, ')'); - return true; - - case F_POSITION_BIT_BIT: - case F_POSITION_BYTEA_BYTEA: - case F_POSITION_TEXT_TEXT: - /* POSITION() ... extra parens since args are b_expr not a_expr */ - appendStringInfoString(buf, "POSITION(("); - get_rule_expr((Node *) lsecond(expr->args), context, false); - appendStringInfoString(buf, ") IN ("); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoString(buf, "))"); - return true; - - case F_SUBSTRING_BIT_INT4: - case F_SUBSTRING_BIT_INT4_INT4: - case F_SUBSTRING_BYTEA_INT4: - case F_SUBSTRING_BYTEA_INT4_INT4: - case F_SUBSTRING_TEXT_INT4: - case F_SUBSTRING_TEXT_INT4_INT4: - /* SUBSTRING FROM/FOR (i.e., integer-position variants) */ - appendStringInfoString(buf, "SUBSTRING("); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoString(buf, " FROM "); - get_rule_expr((Node *) lsecond(expr->args), context, false); - if (list_length(expr->args) == 3) - { - appendStringInfoString(buf, " FOR "); - get_rule_expr((Node *) lthird(expr->args), context, false); - } - appendStringInfoChar(buf, ')'); - return true; - - case F_SUBSTRING_TEXT_TEXT_TEXT: - /* SUBSTRING SIMILAR/ESCAPE */ - appendStringInfoString(buf, "SUBSTRING("); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoString(buf, " SIMILAR "); - get_rule_expr((Node *) lsecond(expr->args), context, false); - appendStringInfoString(buf, " ESCAPE "); - get_rule_expr((Node *) lthird(expr->args), context, false); - appendStringInfoChar(buf, ')'); - return true; - - case F_BTRIM_BYTEA_BYTEA: - case F_BTRIM_TEXT: - case F_BTRIM_TEXT_TEXT: - /* TRIM() */ - appendStringInfoString(buf, "TRIM(BOTH"); - if (list_length(expr->args) == 2) - { - appendStringInfoChar(buf, ' '); - get_rule_expr((Node *) lsecond(expr->args), context, false); - } - appendStringInfoString(buf, " FROM "); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoChar(buf, ')'); - return true; - - case F_LTRIM_BYTEA_BYTEA: - case F_LTRIM_TEXT: - case F_LTRIM_TEXT_TEXT: - /* TRIM() */ - appendStringInfoString(buf, "TRIM(LEADING"); - if (list_length(expr->args) == 2) - { - appendStringInfoChar(buf, ' '); - get_rule_expr((Node *) lsecond(expr->args), context, false); - } - appendStringInfoString(buf, " FROM "); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoChar(buf, ')'); - return true; - - case F_RTRIM_BYTEA_BYTEA: - case F_RTRIM_TEXT: - case F_RTRIM_TEXT_TEXT: - /* TRIM() */ - appendStringInfoString(buf, "TRIM(TRAILING"); - if (list_length(expr->args) == 2) - { - appendStringInfoChar(buf, ' '); - get_rule_expr((Node *) lsecond(expr->args), context, false); - } - appendStringInfoString(buf, " FROM "); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoChar(buf, ')'); - return true; - - case F_XMLEXISTS: - /* XMLEXISTS ... extra parens because args are c_expr */ - appendStringInfoString(buf, "XMLEXISTS(("); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoString(buf, ") PASSING ("); - get_rule_expr((Node *) lsecond(expr->args), context, false); - appendStringInfoString(buf, "))"); - return true; - } - return false; -} - -/* ---------- - * get_coercion_expr - * - * Make a string representation of a value coerced to a specific type - * ---------- - */ -static void -get_coercion_expr(Node *arg, deparse_context *context, - Oid resulttype, int32 resulttypmod, - Node *parentNode) -{ - StringInfo buf = context->buf; - - /* - * Since parse_coerce.c doesn't immediately collapse application of - * length-coercion functions to constants, what we'll typically see in - * such cases is a Const with typmod -1 and a length-coercion function - * right above it. Avoid generating redundant output. However, beware of - * suppressing casts when the user actually wrote something like - * 'foo'::text::char(3). - * - * Note: it might seem that we are missing the possibility of needing to - * print a COLLATE clause for such a Const. However, a Const could only - * have nondefault collation in a post-constant-folding tree, in which the - * length coercion would have been folded too. See also the special - * handling of CollateExpr in coerce_to_target_type(): any collation - * marking will be above the coercion node, not below it. - */ - if (arg && IsA(arg, Const) && - ((Const *) arg)->consttype == resulttype && - ((Const *) arg)->consttypmod == -1) - { - /* Show the constant without normal ::typename decoration */ - get_const_expr((Const *) arg, context, -1); - } - else - { - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg, context, false, parentNode); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - appendStringInfo(buf, "::%s", - format_type_with_typemod(resulttype, resulttypmod)); -} - -/* ---------- - * get_const_expr - * - * Make a string representation of a Const - * - * showtype can be -1 to never show "::typename" decoration, or +1 to always - * show it, or 0 to show it only if the constant wouldn't be assumed to be - * the right type by default. - * - * If the Const's collation isn't default for its type, show that too. - * We mustn't do this when showtype is -1 (since that means the caller will - * print "::typename", and we can't put a COLLATE clause in between). It's - * caller's responsibility that collation isn't missed in such cases. - * ---------- - */ -static void -get_const_expr(Const *constval, deparse_context *context, int showtype) -{ - StringInfo buf = context->buf; - Oid typoutput; - bool typIsVarlena; - char *extval; - bool needlabel = false; - - if (constval->constisnull) - { - /* - * Always label the type of a NULL constant to prevent misdecisions - * about type when reparsing. - */ - appendStringInfoString(buf, "NULL"); - if (showtype >= 0) - { - appendStringInfo(buf, "::%s", - format_type_with_typemod(constval->consttype, - constval->consttypmod)); - get_const_collation(constval, context); - } - return; - } - - getTypeOutputInfo(constval->consttype, - &typoutput, &typIsVarlena); - - extval = OidOutputFunctionCall(typoutput, constval->constvalue); - - switch (constval->consttype) - { - case INT4OID: - - /* - * INT4 can be printed without any decoration, unless it is - * negative; in that case print it as '-nnn'::integer to ensure - * that the output will re-parse as a constant, not as a constant - * plus operator. In most cases we could get away with printing - * (-nnn) instead, because of the way that gram.y handles negative - * literals; but that doesn't work for INT_MIN, and it doesn't - * seem that much prettier anyway. - */ - if (extval[0] != '-') - appendStringInfoString(buf, extval); - else - { - appendStringInfo(buf, "'%s'", extval); - needlabel = true; /* we must attach a cast */ - } - break; - - case NUMERICOID: - - /* - * NUMERIC can be printed without quotes if it looks like a float - * constant (not an integer, and not Infinity or NaN) and doesn't - * have a leading sign (for the same reason as for INT4). - */ - if (isdigit((unsigned char) extval[0]) && - strcspn(extval, "eE.") != strlen(extval)) - { - appendStringInfoString(buf, extval); - } - else - { - appendStringInfo(buf, "'%s'", extval); - needlabel = true; /* we must attach a cast */ - } - break; - - case BITOID: - case VARBITOID: - appendStringInfo(buf, "B'%s'", extval); - break; - - case BOOLOID: - if (strcmp(extval, "t") == 0) - appendStringInfoString(buf, "true"); - else - appendStringInfoString(buf, "false"); - break; - - default: - simple_quote_literal(buf, extval); - break; - } - - pfree(extval); - - if (showtype < 0) - return; - - /* - * For showtype == 0, append ::typename unless the constant will be - * implicitly typed as the right type when it is read in. - * - * XXX this code has to be kept in sync with the behavior of the parser, - * especially make_const. - */ - switch (constval->consttype) - { - case BOOLOID: - case UNKNOWNOID: - /* These types can be left unlabeled */ - needlabel = false; - break; - case INT4OID: - /* We determined above whether a label is needed */ - break; - case NUMERICOID: - - /* - * Float-looking constants will be typed as numeric, which we - * checked above; but if there's a nondefault typmod we need to - * show it. - */ - needlabel |= (constval->consttypmod >= 0); - break; - default: - needlabel = true; - break; - } - if (needlabel || showtype > 0) - appendStringInfo(buf, "::%s", - format_type_with_typemod(constval->consttype, - constval->consttypmod)); - - get_const_collation(constval, context); -} - -/* - * helper for get_const_expr: append COLLATE if needed - */ -static void -get_const_collation(Const *constval, deparse_context *context) -{ - StringInfo buf = context->buf; - - if (OidIsValid(constval->constcollid)) - { - Oid typcollation = get_typcollation(constval->consttype); - - if (constval->constcollid != typcollation) - { - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(constval->constcollid)); - } - } -} - -/* - * simple_quote_literal - Format a string as a SQL literal, append to buf - */ -static void -simple_quote_literal(StringInfo buf, const char *val) -{ - const char *valptr; - - /* - * We form the string literal according to the prevailing setting of - * standard_conforming_strings; we never use E''. User is responsible for - * making sure result is used correctly. - */ - appendStringInfoChar(buf, '\''); - for (valptr = val; *valptr; valptr++) - { - char ch = *valptr; - - if (SQL_STR_DOUBLE(ch, !standard_conforming_strings)) - appendStringInfoChar(buf, ch); - appendStringInfoChar(buf, ch); - } - appendStringInfoChar(buf, '\''); -} - -/* ---------- - * get_sublink_expr - Parse back a sublink - * ---------- - */ -static void -get_sublink_expr(SubLink *sublink, deparse_context *context) -{ - StringInfo buf = context->buf; - Query *query = (Query *) (sublink->subselect); - char *opname = NULL; - bool need_paren; - - if (sublink->subLinkType == ARRAY_SUBLINK) - appendStringInfoString(buf, "ARRAY("); - else - appendStringInfoChar(buf, '('); - - /* - * Note that we print the name of only the first operator, when there are - * multiple combining operators. This is an approximation that could go - * wrong in various scenarios (operators in different schemas, renamed - * operators, etc) but there is not a whole lot we can do about it, since - * the syntax allows only one operator to be shown. - */ - if (sublink->testexpr) - { - if (IsA(sublink->testexpr, OpExpr)) - { - /* single combining operator */ - OpExpr *opexpr = (OpExpr *) sublink->testexpr; - - get_rule_expr(linitial(opexpr->args), context, true); - opname = generate_operator_name(opexpr->opno, - exprType(linitial(opexpr->args)), - exprType(lsecond(opexpr->args))); - } - else if (IsA(sublink->testexpr, BoolExpr)) - { - /* multiple combining operators, = or <> cases */ - char *sep; - ListCell *l; - - appendStringInfoChar(buf, '('); - sep = ""; - foreach(l, ((BoolExpr *) sublink->testexpr)->args) - { - OpExpr *opexpr = lfirst_node(OpExpr, l); - - appendStringInfoString(buf, sep); - get_rule_expr(linitial(opexpr->args), context, true); - if (!opname) - opname = generate_operator_name(opexpr->opno, - exprType(linitial(opexpr->args)), - exprType(lsecond(opexpr->args))); - sep = ", "; - } - appendStringInfoChar(buf, ')'); - } - else if (IsA(sublink->testexpr, RowCompareExpr)) - { - /* multiple combining operators, < <= > >= cases */ - RowCompareExpr *rcexpr = (RowCompareExpr *) sublink->testexpr; - - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) rcexpr->largs, context, true); - opname = generate_operator_name(linitial_oid(rcexpr->opnos), - exprType(linitial(rcexpr->largs)), - exprType(linitial(rcexpr->rargs))); - appendStringInfoChar(buf, ')'); - } - else - elog(ERROR, "unrecognized testexpr type: %d", - (int) nodeTag(sublink->testexpr)); - } - - need_paren = true; - - switch (sublink->subLinkType) - { - case EXISTS_SUBLINK: - appendStringInfoString(buf, "EXISTS "); - break; - - case ANY_SUBLINK: - if (strcmp(opname, "=") == 0) /* Represent = ANY as IN */ - appendStringInfoString(buf, " IN "); - else - appendStringInfo(buf, " %s ANY ", opname); - break; - - case ALL_SUBLINK: - appendStringInfo(buf, " %s ALL ", opname); - break; - - case ROWCOMPARE_SUBLINK: - appendStringInfo(buf, " %s ", opname); - break; - - case EXPR_SUBLINK: - case MULTIEXPR_SUBLINK: - case ARRAY_SUBLINK: - need_paren = false; - break; - - case CTE_SUBLINK: /* shouldn't occur in a SubLink */ - default: - elog(ERROR, "unrecognized sublink type: %d", - (int) sublink->subLinkType); - break; - } - - if (need_paren) - appendStringInfoChar(buf, '('); - - get_query_def(query, buf, context->namespaces, NULL, false, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - - if (need_paren) - appendStringInfoString(buf, "))"); - else - appendStringInfoChar(buf, ')'); -} - -/* ---------- - * get_tablefunc - Parse back a table function - * ---------- - */ -static void -get_tablefunc(TableFunc *tf, deparse_context *context, bool showimplicit) -{ - StringInfo buf = context->buf; - - /* XMLTABLE is the only existing implementation. */ - - appendStringInfoString(buf, "XMLTABLE("); - - if (tf->ns_uris != NIL) - { - ListCell *lc1, - *lc2; - bool first = true; - - appendStringInfoString(buf, "XMLNAMESPACES ("); - forboth(lc1, tf->ns_uris, lc2, tf->ns_names) - { - Node *expr = (Node *) lfirst(lc1); - char *name = strVal(lfirst(lc2)); - - if (!first) - appendStringInfoString(buf, ", "); - else - first = false; - - if (name != NULL) - { - get_rule_expr(expr, context, showimplicit); - appendStringInfo(buf, " AS %s", name); - } - else - { - appendStringInfoString(buf, "DEFAULT "); - get_rule_expr(expr, context, showimplicit); - } - } - appendStringInfoString(buf, "), "); - } - - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) tf->rowexpr, context, showimplicit); - appendStringInfoString(buf, ") PASSING ("); - get_rule_expr((Node *) tf->docexpr, context, showimplicit); - appendStringInfoChar(buf, ')'); - - if (tf->colexprs != NIL) - { - ListCell *l1; - ListCell *l2; - ListCell *l3; - ListCell *l4; - ListCell *l5; - int colnum = 0; - - appendStringInfoString(buf, " COLUMNS "); - forfive(l1, tf->colnames, l2, tf->coltypes, l3, tf->coltypmods, - l4, tf->colexprs, l5, tf->coldefexprs) - { - char *colname = strVal(lfirst(l1)); - Oid typid = lfirst_oid(l2); - int32 typmod = lfirst_int(l3); - Node *colexpr = (Node *) lfirst(l4); - Node *coldefexpr = (Node *) lfirst(l5); - bool ordinality = (tf->ordinalitycol == colnum); - bool notnull = bms_is_member(colnum, tf->notnulls); - - if (colnum > 0) - appendStringInfoString(buf, ", "); - colnum++; - - appendStringInfo(buf, "%s %s", quote_identifier(colname), - ordinality ? "FOR ORDINALITY" : - format_type_with_typemod(typid, typmod)); - if (ordinality) - continue; - - if (coldefexpr != NULL) - { - appendStringInfoString(buf, " DEFAULT ("); - get_rule_expr((Node *) coldefexpr, context, showimplicit); - appendStringInfoChar(buf, ')'); - } - if (colexpr != NULL) - { - appendStringInfoString(buf, " PATH ("); - get_rule_expr((Node *) colexpr, context, showimplicit); - appendStringInfoChar(buf, ')'); - } - if (notnull) - appendStringInfoString(buf, " NOT NULL"); - } - } - - appendStringInfoChar(buf, ')'); -} - -/* ---------- - * get_from_clause - Parse back a FROM clause - * - * "prefix" is the keyword that denotes the start of the list of FROM - * elements. It is FROM when used to parse back SELECT and UPDATE, but - * is USING when parsing back DELETE. - * ---------- - */ -static void -get_from_clause(Query *query, const char *prefix, deparse_context *context) -{ - StringInfo buf = context->buf; - bool first = true; - ListCell *l; - - /* - * We use the query's jointree as a guide to what to print. However, we - * must ignore auto-added RTEs that are marked not inFromCl. (These can - * only appear at the top level of the jointree, so it's sufficient to - * check here.) This check also ensures we ignore the rule pseudo-RTEs - * for NEW and OLD. - */ - foreach(l, query->jointree->fromlist) - { - Node *jtnode = (Node *) lfirst(l); - - if (IsA(jtnode, RangeTblRef)) - { - int varno = ((RangeTblRef *) jtnode)->rtindex; - RangeTblEntry *rte = rt_fetch(varno, query->rtable); - - if (!rte->inFromCl) - continue; - } - - if (first) - { - appendContextKeyword(context, prefix, - -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); - first = false; - - get_from_clause_item(jtnode, query, context); - } - else - { - StringInfoData itembuf; - - appendStringInfoString(buf, ", "); - - /* - * Put the new FROM item's text into itembuf so we can decide - * after we've got it whether or not it needs to go on a new line. - */ - initStringInfo(&itembuf); - context->buf = &itembuf; - - get_from_clause_item(jtnode, query, context); - - /* Restore context's output buffer */ - context->buf = buf; - - /* Consider line-wrapping if enabled */ - if (PRETTY_INDENT(context) && context->wrapColumn >= 0) - { - /* Does the new item start with a new line? */ - if (itembuf.len > 0 && itembuf.data[0] == '\n') - { - /* If so, we shouldn't add anything */ - /* instead, remove any trailing spaces currently in buf */ - removeStringInfoSpaces(buf); - } - else - { - char *trailing_nl; - - /* Locate the start of the current line in the buffer */ - trailing_nl = strrchr(buf->data, '\n'); - if (trailing_nl == NULL) - trailing_nl = buf->data; - else - trailing_nl++; - - /* - * Add a newline, plus some indentation, if the new item - * would cause an overflow. - */ - if (strlen(trailing_nl) + itembuf.len > context->wrapColumn) - appendContextKeyword(context, "", -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_VAR); - } - } - - /* Add the new item */ - appendStringInfoString(buf, itembuf.data); - - /* clean up */ - pfree(itembuf.data); - } - } -} - -static void -get_from_clause_item(Node *jtnode, Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); - - if (IsA(jtnode, RangeTblRef)) - { - int varno = ((RangeTblRef *) jtnode)->rtindex; - RangeTblEntry *rte = rt_fetch(varno, query->rtable); - char *refname = get_rtable_name(varno, context); - deparse_columns *colinfo = deparse_columns_fetch(varno, dpns); - RangeTblFunction *rtfunc1 = NULL; - bool printalias; - CitusRTEKind rteKind = GetRangeTblKind(rte); - - if (rte->lateral) - appendStringInfoString(buf, "LATERAL "); - - /* Print the FROM item proper */ - switch (rte->rtekind) - { - case RTE_RELATION: - /* Normal relation RTE */ - appendStringInfo(buf, "%s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, - context->namespaces)); - break; - case RTE_SUBQUERY: - /* Subquery RTE */ - appendStringInfoChar(buf, '('); - get_query_def(rte->subquery, buf, context->namespaces, NULL, - true, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - appendStringInfoChar(buf, ')'); - break; - case RTE_FUNCTION: - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "%s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, - fragmentTableName)); - break; - } - - /* Function RTE */ - rtfunc1 = (RangeTblFunction *) linitial(rte->functions); - - /* - * Omit ROWS FROM() syntax for just one function, unless it - * has both a coldeflist and WITH ORDINALITY. If it has both, - * we must use ROWS FROM() syntax to avoid ambiguity about - * whether the coldeflist includes the ordinality column. - */ - if (list_length(rte->functions) == 1 && - (rtfunc1->funccolnames == NIL || !rte->funcordinality)) - { - get_rule_expr_funccall(rtfunc1->funcexpr, context, true); - /* we'll print the coldeflist below, if it has one */ - } - else - { - bool all_unnest; - ListCell *lc; - - /* - * If all the function calls in the list are to unnest, - * and none need a coldeflist, then collapse the list back - * down to UNNEST(args). (If we had more than one - * built-in unnest function, this would get more - * difficult.) - * - * XXX This is pretty ugly, since it makes not-terribly- - * future-proof assumptions about what the parser would do - * with the output; but the alternative is to emit our - * nonstandard ROWS FROM() notation for what might have - * been a perfectly spec-compliant multi-argument - * UNNEST(). - */ - all_unnest = true; - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - - if (!IsA(rtfunc->funcexpr, FuncExpr) || - ((FuncExpr *) rtfunc->funcexpr)->funcid != F_UNNEST_ANYARRAY || - rtfunc->funccolnames != NIL) - { - all_unnest = false; - break; - } - } - - if (all_unnest) - { - List *allargs = NIL; - - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - List *args = ((FuncExpr *) rtfunc->funcexpr)->args; - - allargs = list_concat(allargs, args); - } - - appendStringInfoString(buf, "UNNEST("); - get_rule_expr((Node *) allargs, context, true); - appendStringInfoChar(buf, ')'); - } - else - { - int funcno = 0; - - appendStringInfoString(buf, "ROWS FROM("); - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - - if (funcno > 0) - appendStringInfoString(buf, ", "); - get_rule_expr_funccall(rtfunc->funcexpr, context, true); - if (rtfunc->funccolnames != NIL) - { - /* Reconstruct the column definition list */ - appendStringInfoString(buf, " AS "); - get_from_clause_coldeflist(rtfunc, - NULL, - context); - } - funcno++; - } - appendStringInfoChar(buf, ')'); - } - /* prevent printing duplicate coldeflist below */ - rtfunc1 = NULL; - } - if (rte->funcordinality) - appendStringInfoString(buf, " WITH ORDINALITY"); - break; - case RTE_TABLEFUNC: - get_tablefunc(rte->tablefunc, context, true); - break; - case RTE_VALUES: - /* Values list RTE */ - appendStringInfoChar(buf, '('); - get_values_def(rte->values_lists, context); - appendStringInfoChar(buf, ')'); - break; - case RTE_CTE: - appendStringInfoString(buf, quote_identifier(rte->ctename)); - break; - default: - elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind); - break; - } - - /* Print the relation alias, if needed */ - printalias = false; - if (rte->alias != NULL) - { - /* Always print alias if user provided one */ - printalias = true; - } - else if (colinfo->printaliases) - { - /* Always print alias if we need to print column aliases */ - printalias = true; - } - else if (rte->rtekind == RTE_RELATION) - { - /* - * No need to print alias if it's same as relation name (this - * would normally be the case, but not if set_rtable_names had to - * resolve a conflict). - */ - if (strcmp(refname, get_relation_name(rte->relid)) != 0) - printalias = true; - } - else if (rte->rtekind == RTE_FUNCTION) - { - /* - * For a function RTE, always print alias. This covers possible - * renaming of the function and/or instability of the - * FigureColname rules for things that aren't simple functions. - * Note we'd need to force it anyway for the columndef list case. - */ - printalias = true; - } - else if (rte->rtekind == RTE_VALUES) - { - /* Alias is syntactically required for VALUES */ - printalias = true; - } - else if (rte->rtekind == RTE_CTE) - { - /* - * No need to print alias if it's same as CTE name (this would - * normally be the case, but not if set_rtable_names had to - * resolve a conflict). - */ - if (strcmp(refname, rte->ctename) != 0) - printalias = true; - } - else if (rte->rtekind == RTE_SUBQUERY) - { - /* subquery requires alias too */ - printalias = true; - } - if (printalias) - appendStringInfo(buf, " %s", quote_identifier(refname)); - - /* Print the column definitions or aliases, if needed */ - if (rtfunc1 && rtfunc1->funccolnames != NIL) - { - /* Reconstruct the columndef list, which is also the aliases */ - get_from_clause_coldeflist(rtfunc1, colinfo, context); - } - else if (GetRangeTblKind(rte) != CITUS_RTE_SHARD || - (rte->alias != NULL && rte->alias->colnames != NIL)) - { - /* Else print column aliases as needed */ - get_column_alias_list(colinfo, context); - } - /* check if column's are given aliases in distributed tables */ - else if (colinfo->parentUsing != NIL) - { - Assert(colinfo->printaliases); - get_column_alias_list(colinfo, context); - } - - /* Tablesample clause must go after any alias */ - if ((rteKind == CITUS_RTE_RELATION || rteKind == CITUS_RTE_SHARD) && - rte->tablesample) - { - get_tablesample_def(rte->tablesample, context); - } - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); - bool need_paren_on_right; - - need_paren_on_right = PRETTY_PAREN(context) && - !IsA(j->rarg, RangeTblRef) && - !(IsA(j->rarg, JoinExpr) && ((JoinExpr *) j->rarg)->alias != NULL); - - if (!PRETTY_PAREN(context) || j->alias != NULL) - appendStringInfoChar(buf, '('); - - get_from_clause_item(j->larg, query, context); - - switch (j->jointype) - { - case JOIN_INNER: - if (j->quals) - appendContextKeyword(context, " JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - else - appendContextKeyword(context, " CROSS JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_LEFT: - appendContextKeyword(context, " LEFT JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_FULL: - appendContextKeyword(context, " FULL JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_RIGHT: - appendContextKeyword(context, " RIGHT JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - default: - elog(ERROR, "unrecognized join type: %d", - (int) j->jointype); - } - - if (need_paren_on_right) - appendStringInfoChar(buf, '('); - get_from_clause_item(j->rarg, query, context); - if (need_paren_on_right) - appendStringInfoChar(buf, ')'); - - if (j->usingClause) - { - ListCell *lc; - bool first = true; - - appendStringInfoString(buf, " USING ("); - /* Use the assigned names, not what's in usingClause */ - foreach(lc, colinfo->usingNames) - { - char *colname = (char *) lfirst(lc); - - if (first) - first = false; - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, quote_identifier(colname)); - } - appendStringInfoChar(buf, ')'); - - if (j->join_using_alias) - appendStringInfo(buf, " AS %s", - quote_identifier(j->join_using_alias->aliasname)); - } - else if (j->quals) - { - appendStringInfoString(buf, " ON "); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr(j->quals, context, false); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - else if (j->jointype != JOIN_INNER) - { - /* If we didn't say CROSS JOIN above, we must provide an ON */ - appendStringInfoString(buf, " ON TRUE"); - } - - if (!PRETTY_PAREN(context) || j->alias != NULL) - appendStringInfoChar(buf, ')'); - - /* Yes, it's correct to put alias after the right paren ... */ - if (j->alias != NULL) - { - /* - * Note that it's correct to emit an alias clause if and only if - * there was one originally. Otherwise we'd be converting a named - * join to unnamed or vice versa, which creates semantic - * subtleties we don't want. However, we might print a different - * alias name than was there originally. - */ - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(j->rtindex, - context))); - get_column_alias_list(colinfo, context); - } - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); -} - -/* - * get_column_alias_list - print column alias list for an RTE - * - * Caller must already have printed the relation's alias name. - */ -static void -get_column_alias_list(deparse_columns *colinfo, deparse_context *context) -{ - StringInfo buf = context->buf; - int i; - bool first = true; - - /* Don't print aliases if not needed */ - if (!colinfo->printaliases) - return; - - for (i = 0; i < colinfo->num_new_cols; i++) - { - char *colname = colinfo->new_colnames[i]; - - if (first) - { - appendStringInfoChar(buf, '('); - first = false; - } - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, quote_identifier(colname)); - } - if (!first) - appendStringInfoChar(buf, ')'); -} - -/* - * get_from_clause_coldeflist - reproduce FROM clause coldeflist - * - * When printing a top-level coldeflist (which is syntactically also the - * relation's column alias list), use column names from colinfo. But when - * printing a coldeflist embedded inside ROWS FROM(), we prefer to use the - * original coldeflist's names, which are available in rtfunc->funccolnames. - * Pass NULL for colinfo to select the latter behavior. - * - * The coldeflist is appended immediately (no space) to buf. Caller is - * responsible for ensuring that an alias or AS is present before it. - */ -static void -get_from_clause_coldeflist(RangeTblFunction *rtfunc, - deparse_columns *colinfo, - deparse_context *context) -{ - StringInfo buf = context->buf; - ListCell *l1; - ListCell *l2; - ListCell *l3; - ListCell *l4; - int i; - - appendStringInfoChar(buf, '('); - - i = 0; - forfour(l1, rtfunc->funccoltypes, - l2, rtfunc->funccoltypmods, - l3, rtfunc->funccolcollations, - l4, rtfunc->funccolnames) - { - Oid atttypid = lfirst_oid(l1); - int32 atttypmod = lfirst_int(l2); - Oid attcollation = lfirst_oid(l3); - char *attname; - - if (colinfo) - attname = colinfo->colnames[i]; - else - attname = strVal(lfirst(l4)); - - Assert(attname); /* shouldn't be any dropped columns here */ - - if (i > 0) - appendStringInfoString(buf, ", "); - appendStringInfo(buf, "%s %s", - quote_identifier(attname), - format_type_with_typemod(atttypid, atttypmod)); - if (OidIsValid(attcollation) && - attcollation != get_typcollation(atttypid)) - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(attcollation)); - - i++; - } - - appendStringInfoChar(buf, ')'); -} - -/* - * get_tablesample_def - print a TableSampleClause - */ -static void -get_tablesample_def(TableSampleClause *tablesample, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid argtypes[1]; - int nargs; - ListCell *l; - - /* - * We should qualify the handler's function name if it wouldn't be - * resolved by lookup in the current search path. - */ - argtypes[0] = INTERNALOID; - appendStringInfo(buf, " TABLESAMPLE %s (", - generate_function_name(tablesample->tsmhandler, 1, - NIL, argtypes, - false, NULL, false)); - - nargs = 0; - foreach(l, tablesample->args) - { - if (nargs++ > 0) - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) lfirst(l), context, false); - } - appendStringInfoChar(buf, ')'); - - if (tablesample->repeatable != NULL) - { - appendStringInfoString(buf, " REPEATABLE ("); - get_rule_expr((Node *) tablesample->repeatable, context, false); - appendStringInfoChar(buf, ')'); - } -} - -/* - * get_opclass_name - fetch name of an index operator class - * - * The opclass name is appended (after a space) to buf. - * - * Output is suppressed if the opclass is the default for the given - * actual_datatype. (If you don't want this behavior, just pass - * InvalidOid for actual_datatype.) - */ -static void -get_opclass_name(Oid opclass, Oid actual_datatype, - StringInfo buf) -{ - HeapTuple ht_opc; - Form_pg_opclass opcrec; - char *opcname; - char *nspname; - - ht_opc = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); - if (!HeapTupleIsValid(ht_opc)) - elog(ERROR, "cache lookup failed for opclass %u", opclass); - opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc); - - if (!OidIsValid(actual_datatype) || - GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) - { - /* Okay, we need the opclass name. Do we need to qualify it? */ - opcname = NameStr(opcrec->opcname); - if (OpclassIsVisible(opclass)) - appendStringInfo(buf, " %s", quote_identifier(opcname)); - else - { - nspname = get_namespace_name_or_temp(opcrec->opcnamespace); - appendStringInfo(buf, " %s.%s", - quote_identifier(nspname), - quote_identifier(opcname)); - } - } - ReleaseSysCache(ht_opc); -} - -/* - * processIndirection - take care of array and subfield assignment - * - * We strip any top-level FieldStore or assignment SubscriptingRef nodes that - * appear in the input, printing them as decoration for the base column - * name (which we assume the caller just printed). We might also need to - * strip CoerceToDomain nodes, but only ones that appear above assignment - * nodes. - * - * Returns the subexpression that's to be assigned. - */ -static Node * -processIndirection(Node *node, deparse_context *context) -{ - StringInfo buf = context->buf; - CoerceToDomain *cdomain = NULL; - - for (;;) - { - if (node == NULL) - break; - if (IsA(node, FieldStore)) - { - FieldStore *fstore = (FieldStore *) node; - Oid typrelid; - char *fieldname; - - /* lookup tuple type */ - typrelid = get_typ_typrelid(fstore->resulttype); - if (!OidIsValid(typrelid)) - elog(ERROR, "argument type %s of FieldStore is not a tuple type", - format_type_be(fstore->resulttype)); - - /* - * Print the field name. There should only be one target field in - * stored rules. There could be more than that in executable - * target lists, but this function cannot be used for that case. - */ - Assert(list_length(fstore->fieldnums) == 1); - fieldname = get_attname(typrelid, - linitial_int(fstore->fieldnums), false); - appendStringInfo(buf, ".%s", quote_identifier(fieldname)); - - /* - * We ignore arg since it should be an uninteresting reference to - * the target column or subcolumn. - */ - node = (Node *) linitial(fstore->newvals); - } - else if (IsA(node, SubscriptingRef)) - { - SubscriptingRef *sbsref = (SubscriptingRef *) node; - - if (sbsref->refassgnexpr == NULL) - break; - printSubscripts(sbsref, context); - - /* - * We ignore refexpr since it should be an uninteresting reference - * to the target column or subcolumn. - */ - node = (Node *) sbsref->refassgnexpr; - } - else if (IsA(node, CoerceToDomain)) - { - cdomain = (CoerceToDomain *) node; - /* If it's an explicit domain coercion, we're done */ - if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) - break; - /* Tentatively descend past the CoerceToDomain */ - node = (Node *) cdomain->arg; - } - else - break; - } - - /* - * If we descended past a CoerceToDomain whose argument turned out not to - * be a FieldStore or array assignment, back up to the CoerceToDomain. - * (This is not enough to be fully correct if there are nested implicit - * CoerceToDomains, but such cases shouldn't ever occur.) - */ - if (cdomain && node == (Node *) cdomain->arg) - node = (Node *) cdomain; - - return node; -} - -static void -printSubscripts(SubscriptingRef *sbsref, deparse_context *context) -{ - StringInfo buf = context->buf; - ListCell *lowlist_item; - ListCell *uplist_item; - - lowlist_item = list_head(sbsref->reflowerindexpr); /* could be NULL */ - foreach(uplist_item, sbsref->refupperindexpr) - { - appendStringInfoChar(buf, '['); - if (lowlist_item) - { - /* If subexpression is NULL, get_rule_expr prints nothing */ - get_rule_expr((Node *) lfirst(lowlist_item), context, false); - appendStringInfoChar(buf, ':'); - lowlist_item = lnext(sbsref->reflowerindexpr, lowlist_item); - } - /* If subexpression is NULL, get_rule_expr prints nothing */ - get_rule_expr((Node *) lfirst(uplist_item), context, false); - appendStringInfoChar(buf, ']'); - } -} - -/* - * get_relation_name - * Get the unqualified name of a relation specified by OID - * - * This differs from the underlying get_rel_name() function in that it will - * throw error instead of silently returning NULL if the OID is bad. - */ -static char * -get_relation_name(Oid relid) -{ - char *relname = get_rel_name(relid); - - if (!relname) - elog(ERROR, "cache lookup failed for relation %u", relid); - return relname; -} - -/* - * generate_relation_or_shard_name - * Compute the name to display for a relation or shard - * - * If the provided relid is equal to the provided distrelid, this function - * returns a shard-extended relation name; otherwise, it falls through to a - * simple generate_relation_name call. - */ -static char * -generate_relation_or_shard_name(Oid relid, Oid distrelid, int64 shardid, - List *namespaces) -{ - char *relname = NULL; - - if (relid == distrelid) - { - relname = get_relation_name(relid); - - if (shardid > 0) - { - Oid schemaOid = get_rel_namespace(relid); - char *schemaName = get_namespace_name_or_temp(schemaOid); - - AppendShardIdToName(&relname, shardid); - - relname = quote_qualified_identifier(schemaName, relname); - } - } - else - { - relname = generate_relation_name(relid, namespaces); - } - - return relname; -} - -/* - * generate_relation_name - * Compute the name to display for a relation specified by OID - * - * The result includes all necessary quoting and schema-prefixing. - * - * If namespaces isn't NIL, it must be a list of deparse_namespace nodes. - * We will forcibly qualify the relation name if it equals any CTE name - * visible in the namespace list. - */ -char * -generate_relation_name(Oid relid, List *namespaces) -{ - HeapTuple tp; - Form_pg_class reltup; - bool need_qual; - ListCell *nslist; - char *relname; - char *nspname; - char *result; - - tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for relation %u", relid); - reltup = (Form_pg_class) GETSTRUCT(tp); - relname = NameStr(reltup->relname); - - /* Check for conflicting CTE name */ - need_qual = false; - foreach(nslist, namespaces) - { - deparse_namespace *dpns = (deparse_namespace *) lfirst(nslist); - ListCell *ctlist; - - foreach(ctlist, dpns->ctes) - { - CommonTableExpr *cte = (CommonTableExpr *) lfirst(ctlist); - - if (strcmp(cte->ctename, relname) == 0) - { - need_qual = true; - break; - } - } - if (need_qual) - break; - } - - /* Otherwise, qualify the name if not visible in search path */ - if (!need_qual) - need_qual = !RelationIsVisible(relid); - - if (need_qual) - nspname = get_namespace_name_or_temp(reltup->relnamespace); - else - nspname = NULL; - - result = quote_qualified_identifier(nspname, relname); - - ReleaseSysCache(tp); - - return result; -} - -/* - * generate_rte_shard_name returns the qualified name of the shard given a - * CITUS_RTE_SHARD range table entry. - */ -static char * -generate_rte_shard_name(RangeTblEntry *rangeTableEntry) -{ - char *shardSchemaName = NULL; - char *shardTableName = NULL; - - Assert(GetRangeTblKind(rangeTableEntry) == CITUS_RTE_SHARD); - - ExtractRangeTblExtraData(rangeTableEntry, NULL, &shardSchemaName, &shardTableName, - NULL); - - return generate_fragment_name(shardSchemaName, shardTableName); -} - -/* - * generate_fragment_name - * Compute the name to display for a shard or merged table - * - * The result includes all necessary quoting and schema-prefixing. The schema - * name can be NULL for regular shards. For merged tables, they are always - * declared within a job-specific schema, and therefore can't have null schema - * names. - */ -static char * -generate_fragment_name(char *schemaName, char *tableName) -{ - StringInfo fragmentNameString = makeStringInfo(); - - if (schemaName != NULL) - { - appendStringInfo(fragmentNameString, "%s.%s", quote_identifier(schemaName), - quote_identifier(tableName)); - } - else - { - appendStringInfoString(fragmentNameString, quote_identifier(tableName)); - } - - return fragmentNameString->data; -} - -/* - * generate_function_name - * Compute the name to display for a function specified by OID, - * given that it is being called with the specified actual arg names and - * types. (Those matter because of ambiguous-function resolution rules.) - * - * If we're dealing with a potentially variadic function (in practice, this - * means a FuncExpr or Aggref, not some other way of calling a function), then - * has_variadic must specify whether variadic arguments have been merged, - * and *use_variadic_p will be set to indicate whether to print VARIADIC in - * the output. For non-FuncExpr cases, has_variadic should be false and - * use_variadic_p can be NULL. - * - * inGroupBy must be true if we're deparsing a GROUP BY clause. - * - * The result includes all necessary quoting and schema-prefixing. - */ -static char * -generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, - bool has_variadic, bool *use_variadic_p, - bool inGroupBy) -{ - char *result; - HeapTuple proctup; - Form_pg_proc procform; - char *proname; - bool use_variadic; - char *nspname; - FuncDetailCode p_result; - Oid p_funcid; - Oid p_rettype; - bool p_retset; - int p_nvargs; - Oid p_vatype; - Oid *p_true_typeids; - bool force_qualify = false; - - proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); - if (!HeapTupleIsValid(proctup)) - elog(ERROR, "cache lookup failed for function %u", funcid); - procform = (Form_pg_proc) GETSTRUCT(proctup); - proname = NameStr(procform->proname); - - /* - * Due to parser hacks to avoid needing to reserve CUBE, we need to force - * qualification of some function names within GROUP BY. - */ - if (inGroupBy) - { - if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0) - force_qualify = true; - } - - /* - * Determine whether VARIADIC should be printed. We must do this first - * since it affects the lookup rules in func_get_detail(). - * - * Currently, we always print VARIADIC if the function has a merged - * variadic-array argument. Note that this is always the case for - * functions taking a VARIADIC argument type other than VARIADIC ANY. - * - * In principle, if VARIADIC wasn't originally specified and the array - * actual argument is deconstructable, we could print the array elements - * separately and not print VARIADIC, thus more nearly reproducing the - * original input. For the moment that seems like too much complication - * for the benefit, and anyway we do not know whether VARIADIC was - * originally specified if it's a non-ANY type. - */ - if (use_variadic_p) - { - /* Parser should not have set funcvariadic unless fn is variadic */ - Assert(!has_variadic || OidIsValid(procform->provariadic)); - use_variadic = has_variadic; - *use_variadic_p = use_variadic; - } - else - { - Assert(!has_variadic); - use_variadic = false; - } - - /* - * The idea here is to schema-qualify only if the parser would fail to - * resolve the correct function given the unqualified func name with the - * specified argtypes and VARIADIC flag. But if we already decided to - * force qualification, then we can skip the lookup and pretend we didn't - * find it. - */ - if (!force_qualify) - p_result = func_get_detail(list_make1(makeString(proname)), - NIL, argnames, nargs, argtypes, - !use_variadic, true, false, - &p_funcid, &p_rettype, - &p_retset, &p_nvargs, &p_vatype, - &p_true_typeids, NULL); - else - { - p_result = FUNCDETAIL_NOTFOUND; - p_funcid = InvalidOid; - } - - if ((p_result == FUNCDETAIL_NORMAL || - p_result == FUNCDETAIL_AGGREGATE || - p_result == FUNCDETAIL_WINDOWFUNC) && - p_funcid == funcid) - nspname = NULL; - else - nspname = get_namespace_name_or_temp(procform->pronamespace); - - result = quote_qualified_identifier(nspname, proname); - - ReleaseSysCache(proctup); - - return result; -} - -/* - * generate_operator_name - * Compute the name to display for an operator specified by OID, - * given that it is being called with the specified actual arg types. - * (Arg types matter because of ambiguous-operator resolution rules. - * Pass InvalidOid for unused arg of a unary operator.) - * - * The result includes all necessary quoting and schema-prefixing, - * plus the OPERATOR() decoration needed to use a qualified operator name - * in an expression. - */ -char * -generate_operator_name(Oid operid, Oid arg1, Oid arg2) -{ - StringInfoData buf; - HeapTuple opertup; - Form_pg_operator operform; - char *oprname; - char *nspname; - - initStringInfo(&buf); - - opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operid)); - if (!HeapTupleIsValid(opertup)) - elog(ERROR, "cache lookup failed for operator %u", operid); - operform = (Form_pg_operator) GETSTRUCT(opertup); - oprname = NameStr(operform->oprname); - - /* - * Unlike generate_operator_name() in postgres/src/backend/utils/adt/ruleutils.c, - * we don't check if the operator is in current namespace or not. This is - * because this check is costly when the operator is not in current namespace. - */ - nspname = get_namespace_name_or_temp(operform->oprnamespace); - Assert(nspname != NULL); - appendStringInfo(&buf, "OPERATOR(%s.", quote_identifier(nspname)); - appendStringInfoString(&buf, oprname); - appendStringInfoChar(&buf, ')'); - - ReleaseSysCache(opertup); - - return buf.data; -} - -/* - * get_one_range_partition_bound_string - * A C string representation of one range partition bound - */ -char * -get_range_partbound_string(List *bound_datums) -{ - deparse_context context; - StringInfo buf = makeStringInfo(); - ListCell *cell; - char *sep; - - memset(&context, 0, sizeof(deparse_context)); - context.buf = buf; - - appendStringInfoChar(buf, '('); - sep = ""; - foreach(cell, bound_datums) - { - PartitionRangeDatum *datum = - lfirst_node(PartitionRangeDatum, cell); - - appendStringInfoString(buf, sep); - if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE) - appendStringInfoString(buf, "MINVALUE"); - else if (datum->kind == PARTITION_RANGE_DATUM_MAXVALUE) - appendStringInfoString(buf, "MAXVALUE"); - else - { - Const *val = castNode(Const, datum->value); - - get_const_expr(val, &context, -1); - } - sep = ", "; - } - appendStringInfoChar(buf, ')'); - - return buf->data; -} - -/* - * Collect a list of OIDs of all sequences owned by the specified relation, - * and column if specified. If deptype is not zero, then only find sequences - * with the specified dependency type. - */ -List * -getOwnedSequences_internal(Oid relid, AttrNumber attnum, char deptype) -{ - List *result = NIL; - Relation depRel; - ScanKeyData key[3]; - SysScanDesc scan; - HeapTuple tup; - - depRel = table_open(DependRelationId, AccessShareLock); - - ScanKeyInit(&key[0], - Anum_pg_depend_refclassid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationRelationId)); - ScanKeyInit(&key[1], - Anum_pg_depend_refobjid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(relid)); - if (attnum) - ScanKeyInit(&key[2], - Anum_pg_depend_refobjsubid, - BTEqualStrategyNumber, F_INT4EQ, - Int32GetDatum(attnum)); - - scan = systable_beginscan(depRel, DependReferenceIndexId, true, - NULL, attnum ? 3 : 2, key); - - while (HeapTupleIsValid(tup = systable_getnext(scan))) - { - Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup); - - /* - * We assume any auto or internal dependency of a sequence on a column - * must be what we are looking for. (We need the relkind test because - * indexes can also have auto dependencies on columns.) - */ - if (deprec->classid == RelationRelationId && - deprec->objsubid == 0 && - deprec->refobjsubid != 0 && - (deprec->deptype == DEPENDENCY_AUTO || deprec->deptype == DEPENDENCY_INTERNAL) && - get_rel_relkind(deprec->objid) == RELKIND_SEQUENCE) - { - if (!deptype || deprec->deptype == deptype) - result = lappend_oid(result, deprec->objid); - } - } - - systable_endscan(scan); - - table_close(depRel, AccessShareLock); - - return result; -} - -/* - * get_insert_column_names_list Prepares the insert-column-names list. Any indirection - * decoration needed on the column names can be inferred from the top targetlist. - */ -static List * -get_insert_column_names_list(List *targetList, StringInfo buf, - deparse_context *context, RangeTblEntry *rte) -{ - char *sep; - ListCell *l; - List *strippedexprs; - - strippedexprs = NIL; - sep = ""; - appendStringInfoChar(buf, '('); - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - - if (tle->resjunk) - continue; /* ignore junk entries */ - - appendStringInfoString(buf, sep); - sep = ", "; - - /* - * Put out name of target column; look in the catalogs, not at - * tle->resname, since resname will fail to track RENAME. - */ - appendStringInfoString(buf, - quote_identifier(get_attname(rte->relid, - tle->resno, - false))); - - /* - * Print any indirection needed (subfields or subscripts), and strip - * off the top-level nodes representing the indirection assignments. - * Add the stripped expressions to strippedexprs. (If it's a - * single-VALUES statement, the stripped expressions are the VALUES to - * print below. Otherwise they're just Vars and not really - * interesting.) - */ - strippedexprs = lappend(strippedexprs, - processIndirection((Node *) tle->expr, - context)); - } - appendStringInfoString(buf, ") "); - - return strippedexprs; -} -#endif /* (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16) */ diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 9694b85bf..87463788f 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -88,10 +88,6 @@ #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" -#if PG_VERSION_NUM < PG_VERSION_16 -#include "utils/relfilenodemap.h" -#endif - /* user configuration */ int ReadFromSecondaries = USE_SECONDARY_NODES_NEVER; diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 391444856..7de68af5d 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -3195,7 +3195,7 @@ SignalMetadataSyncDaemon(Oid database, int sig) int backendCount = pgstat_fetch_stat_numbackends(); for (int backend = 1; backend <= backendCount; backend++) { - LocalPgBackendStatus *localBeEntry = pgstat_fetch_stat_local_beentry(backend); + LocalPgBackendStatus *localBeEntry = pgstat_get_local_beentry_by_index(backend); if (!localBeEntry) { continue; diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index 1fb3d6fd0..6c819b142 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -29,6 +29,7 @@ #include "catalog/pg_constraint.h" #include "catalog/pg_extension.h" #include "catalog/pg_namespace.h" +#include "catalog/pg_proc_d.h" #include "catalog/pg_type.h" #include "commands/extension.h" #include "commands/sequence.h" @@ -81,10 +82,6 @@ #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" -#if PG_VERSION_NUM >= PG_VERSION_16 -#include "catalog/pg_proc_d.h" -#endif - #define DISK_SPACE_FIELDS 2 /* Local functions forward declarations */ diff --git a/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c index bd9b84e81..5e54b0bf5 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c +++ b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c @@ -96,7 +96,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("name or argument lists may not contain nulls"))); } - typename = typeStringToTypeName_compat(TextDatumGetCString(elems[0]), NULL); + typename = typeStringToTypeName(TextDatumGetCString(elems[0]), NULL); } else if (type == OBJECT_LARGEOBJECT) { @@ -163,7 +163,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr) errmsg("name or argument lists may not contain nulls"))); } args = lappend(args, - typeStringToTypeName_compat(TextDatumGetCString(elems[i]), + typeStringToTypeName(TextDatumGetCString(elems[i]), NULL)); } } diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index 899bd7b54..5d1a5829d 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -2476,7 +2476,7 @@ GetSetCommandListForNewConnections(void) List *commandList = NIL; int gucCount = 0; - struct config_generic **guc_vars = get_guc_variables_compat(&gucCount); + struct config_generic **guc_vars = get_guc_variables(&gucCount); for (int gucIndex = 0; gucIndex < gucCount; gucIndex++) { diff --git a/src/backend/distributed/planner/deparse_shard_query.c b/src/backend/distributed/planner/deparse_shard_query.c index b22bb8028..4b3d3664e 100644 --- a/src/backend/distributed/planner/deparse_shard_query.c +++ b/src/backend/distributed/planner/deparse_shard_query.c @@ -610,11 +610,10 @@ ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte) subquery->jointree = joinTree; rte->rtekind = RTE_SUBQUERY; -#if PG_VERSION_NUM >= PG_VERSION_16 /* no permission checking for this RTE */ rte->perminfoindex = 0; -#endif + rte->subquery = subquery; rte->alias = copyObject(rte->eref); } diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 2036a4378..c754e2bc0 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -29,6 +29,7 @@ #include "optimizer/plancat.h" #include "optimizer/planmain.h" #include "optimizer/planner.h" +#include "parser/parse_relation.h" #include "parser/parse_type.h" #include "parser/parsetree.h" #include "utils/builtins.h" @@ -71,10 +72,6 @@ #include "distributed/version_compat.h" #include "distributed/worker_shard_visibility.h" -#if PG_VERSION_NUM >= PG_VERSION_16 -#include "parser/parse_relation.h" -#endif - static List *plannerRestrictionContextList = NIL; int MultiTaskQueryLogLevel = CITUS_LOG_LEVEL_OFF; /* multi-task query log level */ @@ -1510,7 +1507,6 @@ static void ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan, PlannedStmt *concatPlan) { mainPlan->rtable = list_concat(mainPlan->rtable, concatPlan->rtable); -#if PG_VERSION_NUM >= PG_VERSION_16 /* * concatPlan's range table list is concatenated to mainPlan's range table list @@ -1532,7 +1528,6 @@ ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan, PlannedStmt *concatPlan) /* finally, concatenate perminfos as well */ mainPlan->permInfos = list_concat(mainPlan->permInfos, concatPlan->permInfos); -#endif } @@ -2018,18 +2013,6 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo, { cacheEntry = GetCitusTableCacheEntry(rte->relid); -#if PG_VERSION_NUM == PG_VERSION_15 - - /* - * Postgres 15.0 had a bug regarding inherited statistics expressions, - * which is fixed in 15.1 via Postgres commit - * 1f1865e9083625239769c26f68b9c2861b8d4b1c. - * - * Hence, we only set this value on exactly PG15.0 - */ - relOptInfo->statlist = NIL; -#endif - relationRestrictionContext->allReferenceTables &= IsCitusTableTypeCacheEntry(cacheEntry, REFERENCE_TABLE); } diff --git a/src/backend/distributed/planner/fast_path_router_planner.c b/src/backend/distributed/planner/fast_path_router_planner.c index 80afc7afa..63c68f03a 100644 --- a/src/backend/distributed/planner/fast_path_router_planner.c +++ b/src/backend/distributed/planner/fast_path_router_planner.c @@ -116,24 +116,15 @@ PlannedStmt * GeneratePlaceHolderPlannedStmt(Query *parse) { PlannedStmt *result = makeNode(PlannedStmt); -#if PG_VERSION_NUM >= PG_VERSION_16 SeqScan *scanNode = makeNode(SeqScan); Plan *plan = &(scanNode->scan.plan); -#else - Scan *scanNode = makeNode(Scan); - Plan *plan = &scanNode->plan; -#endif FastPathRestrictionContext fprCtxt PG_USED_FOR_ASSERTS_ONLY = { 0 }; Assert(FastPathRouterQuery(parse, &fprCtxt)); /* there is only a single relation rte */ -#if PG_VERSION_NUM >= PG_VERSION_16 scanNode->scan.scanrelid = 1; -#else - scanNode->scanrelid = 1; -#endif plan->targetlist = copyObject(FetchStatementTargetList((Node *) parse)); @@ -149,9 +140,7 @@ GeneratePlaceHolderPlannedStmt(Query *parse) result->stmt_len = parse->stmt_len; result->rtable = copyObject(parse->rtable); -#if PG_VERSION_NUM >= PG_VERSION_16 result->permInfos = copyObject(parse->rteperminfos); -#endif result->planTree = (Plan *) plan; result->hasReturning = (parse->returningList != NIL); diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 554ac631e..e123d649a 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -623,8 +623,6 @@ CreateCombineQueryForRouterPlan(DistributedPlan *distPlan) combineQuery->canSetTag = true; combineQuery->rtable = list_make1(rangeTableEntry); -#if PG_VERSION_NUM >= PG_VERSION_16 - /* * This part of the code is more of a sanity check for readability, * it doesn't really do anything. @@ -636,7 +634,6 @@ CreateCombineQueryForRouterPlan(DistributedPlan *distPlan) Assert(rangeTableEntry->rtekind == RTE_FUNCTION && rangeTableEntry->perminfoindex == 0); combineQuery->rteperminfos = NIL; -#endif combineQuery->targetList = targetList; combineQuery->jointree = joinTree; @@ -1599,13 +1596,10 @@ WrapSubquery(Query *subquery) outerQuery->rtable = list_make1(rte_subq); -#if PG_VERSION_NUM >= PG_VERSION_16 - /* Ensure RTE_SUBQUERY has proper permission handling */ Assert(rte_subq->rtekind == RTE_SUBQUERY && rte_subq->perminfoindex == 0); outerQuery->rteperminfos = NIL; -#endif RangeTblRef *rtref = makeNode(RangeTblRef); rtref->rtindex = 1; /* Only one RTE, so index is 1 */ diff --git a/src/backend/distributed/planner/local_distributed_join_planner.c b/src/backend/distributed/planner/local_distributed_join_planner.c index 2760377bb..7b44a9c21 100644 --- a/src/backend/distributed/planner/local_distributed_join_planner.c +++ b/src/backend/distributed/planner/local_distributed_join_planner.c @@ -135,9 +135,7 @@ typedef struct RangeTableEntryDetails RangeTblEntry *rangeTableEntry; List *requiredAttributeNumbers; bool hasConstantFilterOnUniqueColumn; -#if PG_VERSION_NUM >= PG_VERSION_16 RTEPermissionInfo *perminfo; -#endif } RangeTableEntryDetails; /* @@ -208,17 +206,11 @@ RecursivelyPlanLocalTableJoins(Query *query, GetPlannerRestrictionContext(context); List *rangeTableList = query->rtable; -#if PG_VERSION_NUM >= PG_VERSION_16 List *rteperminfos = query->rteperminfos; -#endif int resultRTEIdentity = ResultRTEIdentity(query); ConversionCandidates *conversionCandidates = CreateConversionCandidates(plannerRestrictionContext, -#if PG_VERSION_NUM >= PG_VERSION_16 rangeTableList, resultRTEIdentity, rteperminfos); -#else - rangeTableList, resultRTEIdentity, NIL); -#endif ConversionChoice conversionChoise = GetConversionChoice(conversionCandidates, plannerRestrictionContext); @@ -333,12 +325,8 @@ ConvertRTEsToSubquery(List *rangeTableEntryDetailsList, RecursivePlanningContext RangeTblEntry *rangeTableEntry = rangeTableEntryDetails->rangeTableEntry; List *requiredAttributeNumbers = rangeTableEntryDetails->requiredAttributeNumbers; ReplaceRTERelationWithRteSubquery(rangeTableEntry, -#if PG_VERSION_NUM >= PG_VERSION_16 requiredAttributeNumbers, context, rangeTableEntryDetails->perminfo); -#else - requiredAttributeNumbers, context, NULL); -#endif } } @@ -581,14 +569,12 @@ CreateConversionCandidates(PlannerRestrictionContext *plannerRestrictionContext, RequiredAttrNumbersForRelation(rangeTableEntry, plannerRestrictionContext); rangeTableEntryDetails->hasConstantFilterOnUniqueColumn = HasConstantFilterOnUniqueColumn(rangeTableEntry, relationRestriction); -#if PG_VERSION_NUM >= PG_VERSION_16 rangeTableEntryDetails->perminfo = NULL; if (rangeTableEntry->perminfoindex) { rangeTableEntryDetails->perminfo = getRTEPermissionInfo(rteperminfos, rangeTableEntry); } -#endif bool referenceOrDistributedTable = IsCitusTableType(rangeTableEntry->relid, REFERENCE_TABLE) || diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c index c456fa341..52c726c87 100644 --- a/src/backend/distributed/planner/merge_planner.c +++ b/src/backend/distributed/planner/merge_planner.c @@ -835,11 +835,9 @@ ConvertCteRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte) Query *cteQuery = (Query *) copyObject(sourceCte->ctequery); sourceRte->rtekind = RTE_SUBQUERY; -#if PG_VERSION_NUM >= PG_VERSION_16 /* sanity check - sourceRte was RTE_CTE previously so it should have no perminfo */ Assert(sourceRte->perminfoindex == 0); -#endif /* * As we are delinking the CTE from main query, we have to walk through the @@ -889,8 +887,6 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte, /* we copy the input rteRelation to preserve the rteIdentity */ RangeTblEntry *newRangeTableEntry = copyObject(sourceRte); sourceResultsQuery->rtable = list_make1(newRangeTableEntry); - -#if PG_VERSION_NUM >= PG_VERSION_16 sourceResultsQuery->rteperminfos = NIL; if (sourceRte->perminfoindex) { @@ -902,7 +898,6 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte, newRangeTableEntry->perminfoindex = 1; sourceResultsQuery->rteperminfos = list_make1(perminfo); } -#endif /* set the FROM expression to the subquery */ newRangeTableRef->rtindex = SINGLE_RTE_INDEX; @@ -929,9 +924,7 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte, /* replace the function with the constructed subquery */ sourceRte->rtekind = RTE_SUBQUERY; -#if PG_VERSION_NUM >= PG_VERSION_16 sourceRte->perminfoindex = 0; -#endif sourceRte->subquery = sourceResultsQuery; sourceRte->inh = false; } diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index d11aae02f..52e56030e 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -287,13 +287,11 @@ PG_FUNCTION_INFO_V1(worker_save_query_explain_analyze); void CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es) { -#if PG_VERSION_NUM >= PG_VERSION_16 if (es->generic) { ereport(ERROR, (errmsg( "EXPLAIN GENERIC_PLAN is currently not supported for Citus tables"))); } -#endif CitusScanState *scanState = (CitusScanState *) node; DistributedPlan *distributedPlan = scanState->distributedPlan; diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index 14ce199c8..8197a7047 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -2259,13 +2259,10 @@ ConvertToQueryOnShard(Query *query, Oid citusTableOid, Oid shardId) Assert(shardRelationId != InvalidOid); citusTableRte->relid = shardRelationId; -#if PG_VERSION_NUM >= PG_VERSION_16 - /* Change the range table permission oid to that of the shard's (PG16+) */ Assert(list_length(query->rteperminfos) == 1); RTEPermissionInfo *rtePermInfo = (RTEPermissionInfo *) linitial(query->rteperminfos); rtePermInfo->relid = shardRelationId; -#endif return true; } @@ -2573,18 +2570,6 @@ SelectsFromDistributedTable(List *rangeTableList, Query *query) continue; } -#if PG_VERSION_NUM >= 150013 && PG_VERSION_NUM < PG_VERSION_16 - if (rangeTableEntry->rtekind == RTE_SUBQUERY && rangeTableEntry->relkind == 0) - { - /* - * In PG15.13 commit https://github.com/postgres/postgres/commit/317aba70e - * relid is retained when converting views to subqueries, - * so we need an extra check identifying those views - */ - continue; - } -#endif - if (rangeTableEntry->relkind == RELKIND_VIEW || rangeTableEntry->relkind == RELKIND_MATVIEW) { diff --git a/src/backend/distributed/planner/query_colocation_checker.c b/src/backend/distributed/planner/query_colocation_checker.c index d298b0f46..98dd0146a 100644 --- a/src/backend/distributed/planner/query_colocation_checker.c +++ b/src/backend/distributed/planner/query_colocation_checker.c @@ -81,16 +81,12 @@ CreateColocatedJoinChecker(Query *subquery, PlannerRestrictionContext *restricti * functions (i.e., FilterPlannerRestrictionForQuery()) rely on queries * not relations. */ -#if PG_VERSION_NUM >= PG_VERSION_16 RTEPermissionInfo *perminfo = NULL; if (anchorRangeTblEntry->perminfoindex) { perminfo = getRTEPermissionInfo(subquery->rteperminfos, anchorRangeTblEntry); } anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL, perminfo); -#else - anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL, NULL); -#endif } else if (anchorRangeTblEntry->rtekind == RTE_SUBQUERY) { @@ -133,7 +129,7 @@ static RangeTblEntry * AnchorRte(Query *subquery) { FromExpr *joinTree = subquery->jointree; - Relids joinRelIds = get_relids_in_jointree_compat((Node *) joinTree, false, false); + Relids joinRelIds = get_relids_in_jointree((Node *) joinTree, false, false); int currentRTEIndex = -1; RangeTblEntry *anchorRangeTblEntry = NULL; @@ -286,13 +282,11 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation, RangeTblEntry *newRangeTableEntry = copyObject(rteRelation); subquery->rtable = list_make1(newRangeTableEntry); -#if PG_VERSION_NUM >= PG_VERSION_16 if (perminfo) { newRangeTableEntry->perminfoindex = 1; subquery->rteperminfos = list_make1(perminfo); } -#endif /* set the FROM expression to the subquery */ newRangeTableRef = makeNode(RangeTblRef); diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c index b94412f2b..1d6a87543 100644 --- a/src/backend/distributed/planner/query_pushdown_planning.c +++ b/src/backend/distributed/planner/query_pushdown_planning.c @@ -2054,9 +2054,7 @@ SubqueryPushdownMultiNodeTree(Query *originalQuery) pushedDownQuery->targetList = subqueryTargetEntryList; pushedDownQuery->jointree = copyObject(queryTree->jointree); pushedDownQuery->rtable = copyObject(queryTree->rtable); -#if PG_VERSION_NUM >= PG_VERSION_16 pushedDownQuery->rteperminfos = copyObject(queryTree->rteperminfos); -#endif pushedDownQuery->setOperations = copyObject(queryTree->setOperations); pushedDownQuery->querySource = queryTree->querySource; pushedDownQuery->hasSubLinks = queryTree->hasSubLinks; @@ -2190,9 +2188,7 @@ CreateSubqueryTargetListAndAdjustVars(List *columnList) * the var - is empty. Otherwise, when given the query, the Postgres planner * may attempt to access a non-existent range table and segfault, as in #7787. */ -#if PG_VERSION_NUM >= PG_VERSION_16 column->varnullingrels = NULL; -#endif } return subqueryTargetEntryList; diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index 139b30231..2df9038c6 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -973,7 +973,6 @@ RecursivelyPlanDistributedJoinNode(Node *node, Query *query, List *requiredAttributes = RequiredAttrNumbersForRelation(distributedRte, restrictionContext); -#if PG_VERSION_NUM >= PG_VERSION_16 RTEPermissionInfo *perminfo = NULL; if (distributedRte->perminfoindex) { @@ -982,10 +981,6 @@ RecursivelyPlanDistributedJoinNode(Node *node, Query *query, ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes, recursivePlanningContext, perminfo); -#else - ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes, - recursivePlanningContext, NULL); -#endif } else if (distributedRte->rtekind == RTE_SUBQUERY) { @@ -1874,9 +1869,7 @@ ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry, /* replace the function with the constructed subquery */ rangeTableEntry->rtekind = RTE_SUBQUERY; -#if PG_VERSION_NUM >= PG_VERSION_16 rangeTableEntry->perminfoindex = 0; -#endif rangeTableEntry->subquery = subquery; /* @@ -1949,13 +1942,10 @@ CreateOuterSubquery(RangeTblEntry *rangeTableEntry, List *outerSubqueryTargetLis innerSubqueryRTE->eref->colnames = innerSubqueryColNames; outerSubquery->rtable = list_make1(innerSubqueryRTE); -#if PG_VERSION_NUM >= PG_VERSION_16 - /* sanity check */ Assert(innerSubqueryRTE->rtekind == RTE_SUBQUERY && innerSubqueryRTE->perminfoindex == 0); outerSubquery->rteperminfos = NIL; -#endif /* set the FROM expression to the subquery */ @@ -2131,13 +2121,10 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry) /* set the FROM expression to the subquery */ subquery->rtable = list_make1(newRangeTableEntry); -#if PG_VERSION_NUM >= PG_VERSION_16 - /* sanity check */ Assert(newRangeTableEntry->rtekind == RTE_FUNCTION && newRangeTableEntry->perminfoindex == 0); subquery->rteperminfos = NIL; -#endif newRangeTableRef->rtindex = 1; subquery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL); @@ -2459,9 +2446,7 @@ BuildEmptyResultQuery(List *targetEntryList, char *resultId) valuesQuery->canSetTag = true; valuesQuery->commandType = CMD_SELECT; valuesQuery->rtable = list_make1(valuesRangeTable); - #if PG_VERSION_NUM >= PG_VERSION_16 valuesQuery->rteperminfos = NIL; - #endif valuesQuery->jointree = valuesJoinTree; valuesQuery->targetList = valueTargetList; @@ -2478,9 +2463,7 @@ BuildEmptyResultQuery(List *targetEntryList, char *resultId) resultQuery->commandType = CMD_SELECT; resultQuery->canSetTag = true; resultQuery->rtable = list_make1(emptyRangeTable); -#if PG_VERSION_NUM >= PG_VERSION_16 resultQuery->rteperminfos = NIL; -#endif RangeTblRef *rangeTableRef = makeNode(RangeTblRef); rangeTableRef->rtindex = 1; @@ -2630,9 +2613,7 @@ BuildReadIntermediateResultsQuery(List *targetEntryList, List *columnAliasList, Query *resultQuery = makeNode(Query); resultQuery->commandType = CMD_SELECT; resultQuery->rtable = list_make1(rangeTableEntry); -#if PG_VERSION_NUM >= PG_VERSION_16 resultQuery->rteperminfos = NIL; -#endif resultQuery->jointree = joinTree; resultQuery->targetList = targetList; diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index 5a63503f0..f38b9f26b 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -1508,7 +1508,6 @@ GetTargetSubquery(PlannerInfo *root, RangeTblEntry *rangeTableEntry, Var *varToB bool IsRelOptOuterJoin(PlannerInfo *root, int varNo) { -#if PG_VERSION_NUM >= PG_VERSION_16 if (root->simple_rel_array_size <= varNo) { return true; @@ -1520,7 +1519,6 @@ IsRelOptOuterJoin(PlannerInfo *root, int varNo) /* must be an outer join */ return true; } -#endif return false; } diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index 4c43d3513..9868fefd3 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -1515,8 +1515,6 @@ CreateSubscriptions(MultiConnection *sourceConnection, appendStringInfo(createSubscriptionCommand, "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s " "WITH (citus_use_authinfo=true, create_slot=false, " -#if PG_VERSION_NUM >= PG_VERSION_16 - /* * password_required specifies whether connections to the publisher * made as a result of this subscription must use password authentication. @@ -1529,9 +1527,6 @@ CreateSubscriptions(MultiConnection *sourceConnection, * it will be ignored anyway */ "copy_data=false, enabled=false, slot_name=%s, password_required=false", -#else - "copy_data=false, enabled=false, slot_name=%s", -#endif quote_identifier(target->subscriptionName), quote_literal_cstr(conninfo->data), quote_identifier(target->publication->name), diff --git a/src/backend/distributed/shardsplit/shardsplit_decoder.c b/src/backend/distributed/shardsplit/shardsplit_decoder.c index 837009530..bcd25ce2e 100644 --- a/src/backend/distributed/shardsplit/shardsplit_decoder.c +++ b/src/backend/distributed/shardsplit/shardsplit_decoder.c @@ -94,42 +94,6 @@ replication_origin_filter_cb(LogicalDecodingContext *ctx, RepOriginId origin_id) } -/* - * update_replication_progress is copied from Postgres 15. We use it to send keepalive - * messages when we are filtering out the wal changes resulting from the initial copy. - * If we do not send out messages long enough, wal reciever will time out. - * Postgres 16 has refactored this code such that keepalive messages are sent during - * reordering phase which is above change_cb. So we do not need to send keepalive in - * change_cb. - */ -#if (PG_VERSION_NUM < PG_VERSION_16) -static void -update_replication_progress(LogicalDecodingContext *ctx, bool skipped_xact) -{ - static int changes_count = 0; - - /* - * We don't want to try sending a keepalive message after processing each - * change as that can have overhead. Tests revealed that there is no - * noticeable overhead in doing it after continuously processing 100 or so - * changes. - */ -#define CHANGES_THRESHOLD 100 - - /* - * After continuously processing CHANGES_THRESHOLD changes, we - * try to send a keepalive message if required. - */ - if (ctx->end_xact || ++changes_count >= CHANGES_THRESHOLD) - { - OutputPluginUpdateProgress(ctx, skipped_xact); - changes_count = 0; - } -} - - -#endif - /* * shard_split_change_cb function emits the incoming tuple change * to the appropriate destination shard. @@ -148,12 +112,6 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, return; } -#if (PG_VERSION_NUM < PG_VERSION_16) - - /* Send replication keepalive. */ - update_replication_progress(ctx, false); -#endif - /* check if the relation is publishable.*/ if (!is_publishable_relation(relation)) { diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index ac8b0a7d6..08cbd1099 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -2803,7 +2803,7 @@ static void OverridePostgresConfigProperties(void) { int gucCount = 0; - struct config_generic **guc_vars = get_guc_variables_compat(&gucCount); + struct config_generic **guc_vars = get_guc_variables(&gucCount); for (int gucIndex = 0; gucIndex < gucCount; gucIndex++) { @@ -2982,7 +2982,7 @@ ShowShardsForAppNamePrefixesCheckHook(char **newval, void **extra, GucSource sou } char *prefixAscii = pstrdup(appNamePrefix); - pg_clean_ascii_compat(prefixAscii, 0); + pg_clean_ascii(prefixAscii, 0); if (strcmp(prefixAscii, appNamePrefix) != 0) { diff --git a/src/backend/distributed/test/fake_am.c b/src/backend/distributed/test/fake_am.c index 75b7a9fb2..bd954053f 100644 --- a/src/backend/distributed/test/fake_am.c +++ b/src/backend/distributed/test/fake_am.c @@ -562,11 +562,7 @@ static const TableAmRoutine fake_methods = { .tuple_satisfies_snapshot = fake_tuple_satisfies_snapshot, .index_delete_tuples = fake_index_delete_tuples, -#if PG_VERSION_NUM >= PG_VERSION_16 .relation_set_new_filelocator = fake_relation_set_new_filenode, -#else - .relation_set_new_filenode = fake_relation_set_new_filenode, -#endif .relation_nontransactional_truncate = fake_relation_nontransactional_truncate, .relation_copy_data = fake_copy_data, .relation_copy_for_cluster = fake_copy_for_cluster, diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index 11982ec5a..18422847f 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -740,8 +740,6 @@ UnlockLockData(void) * We have separate blocks for PG16 and = PG_VERSION_16 static void AddEdgesForLockWaits(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining) { @@ -820,86 +818,6 @@ AddEdgesForWaitQueue(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remai } -#else - -static void -AddEdgesForLockWaits(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining) -{ - /* the lock for which this process is waiting */ - LOCK *waitLock = waitingProc->waitLock; - - /* determine the conflict mask for the lock level used by the process */ - LockMethod lockMethodTable = GetLocksMethodTable(waitLock); - int conflictMask = lockMethodTable->conflictTab[waitingProc->waitLockMode]; - - /* iterate through the queue of processes holding the lock */ - SHM_QUEUE *procLocks = &waitLock->procLocks; - PROCLOCK *procLock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks, - offsetof(PROCLOCK, lockLink)); - - while (procLock != NULL) - { - PGPROC *currentProc = procLock->tag.myProc; - - /* - * Skip processes from the same lock group, processes that don't conflict, - * and processes that are waiting on safe operations. - */ - if (!IsSameLockGroup(waitingProc, currentProc) && - IsConflictingLockMask(procLock->holdMask, conflictMask) && - !IsProcessWaitingForSafeOperations(currentProc)) - { - AddWaitEdge(waitGraph, waitingProc, currentProc, remaining); - } - - procLock = (PROCLOCK *) SHMQueueNext(procLocks, &procLock->lockLink, - offsetof(PROCLOCK, lockLink)); - } -} - - -static void -AddEdgesForWaitQueue(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining) -{ - /* the lock for which this process is waiting */ - LOCK *waitLock = waitingProc->waitLock; - - /* determine the conflict mask for the lock level used by the process */ - LockMethod lockMethodTable = GetLocksMethodTable(waitLock); - int conflictMask = lockMethodTable->conflictTab[waitingProc->waitLockMode]; - - /* iterate through the wait queue */ - PROC_QUEUE *waitQueue = &(waitLock->waitProcs); - int queueSize = waitQueue->size; - PGPROC *currentProc = (PGPROC *) waitQueue->links.next; - - /* - * Iterate through the queue from the start until we encounter waitingProc, - * since we only care about processes in front of waitingProc in the queue. - */ - while (queueSize-- > 0 && currentProc != waitingProc) - { - int awaitMask = LOCKBIT_ON(currentProc->waitLockMode); - - /* - * Skip processes from the same lock group, processes that don't conflict, - * and processes that are waiting on safe operations. - */ - if (!IsSameLockGroup(waitingProc, currentProc) && - IsConflictingLockMask(awaitMask, conflictMask) && - !IsProcessWaitingForSafeOperations(currentProc)) - { - AddWaitEdge(waitGraph, waitingProc, currentProc, remaining); - } - - currentProc = (PGPROC *) currentProc->links.next; - } -} - - -#endif - - /* * AddWaitEdge adds a new wait edge to a wait graph. The nodes in the graph are * transactions and an edge indicates the "waiting" process is blocked on a lock diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 16c5c56fa..bca0663a8 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -25,6 +25,7 @@ #include "storage/fd.h" #include "utils/datum.h" #include "utils/guc.h" +#include "utils/guc_tables.h" #include "utils/hsearch.h" #include "utils/memutils.h" @@ -807,13 +808,9 @@ AdjustMaxPreparedTransactions(void) * really check if max_prepared_xacts is configured by the user explicitly, * so check if it's value is default. */ -#if PG_VERSION_NUM >= PG_VERSION_16 struct config_generic *gconf = find_option("max_prepared_transactions", false, false, ERROR); if (gconf->source == PGC_S_DEFAULT) -#else - if (max_prepared_xacts == 0) -#endif { char newvalue[12]; diff --git a/src/backend/distributed/utils/function_utils.c b/src/backend/distributed/utils/function_utils.c index 0770b8cb9..f5c2f82e9 100644 --- a/src/backend/distributed/utils/function_utils.c +++ b/src/backend/distributed/utils/function_utils.c @@ -42,8 +42,7 @@ FunctionOidExtended(const char *schemaName, const char *functionName, int argume bool missingOK) { char *qualifiedFunctionName = quote_qualified_identifier(schemaName, functionName); - List *qualifiedFunctionNameList = stringToQualifiedNameList_compat( - qualifiedFunctionName); + List *qualifiedFunctionNameList = stringToQualifiedNameList(qualifiedFunctionName, NULL); List *argumentList = NIL; const bool findVariadics = false; const bool findDefaults = false; diff --git a/src/backend/distributed/utils/relation_utils.c b/src/backend/distributed/utils/relation_utils.c index d39c1f071..f5e2c629a 100644 --- a/src/backend/distributed/utils/relation_utils.c +++ b/src/backend/distributed/utils/relation_utils.c @@ -14,9 +14,7 @@ #include "distributed/relation_utils.h" -#if PG_VERSION_NUM >= PG_VERSION_16 #include "miscadmin.h" -#endif #include "utils/lsyscache.h" #include "utils/rel.h" @@ -33,8 +31,6 @@ RelationGetNamespaceName(Relation relation) } -#if PG_VERSION_NUM >= PG_VERSION_16 - /* * GetFilledPermissionInfo creates RTEPermissionInfo for a given RTE * and fills it with given data and returns this RTEPermissionInfo object. @@ -56,6 +52,3 @@ GetFilledPermissionInfo(Oid relid, bool inh, AclMode requiredPerms) perminfo->checkAsUser = GetUserId(); return perminfo; } - - -#endif diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index 451649969..64824dd9d 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -526,8 +526,7 @@ CreateRenameTypeStmt(const ObjectAddress *address, char *newName) RenameStmt *stmt = makeNode(RenameStmt); stmt->renameType = OBJECT_TYPE; - stmt->object = (Node *) stringToQualifiedNameList_compat(format_type_be_qualified( - address->objectId)); + stmt->object = (Node *) stringToQualifiedNameList(format_type_be_qualified(address->objectId), NULL); stmt->newname = newName; diff --git a/src/include/columnar/columnar.h b/src/include/columnar/columnar.h index 1883be38b..095ce8b45 100644 --- a/src/include/columnar/columnar.h +++ b/src/include/columnar/columnar.h @@ -27,11 +27,7 @@ #include "columnar/columnar_compression.h" #include "columnar/columnar_metadata.h" -#if PG_VERSION_NUM >= PG_VERSION_16 #include "storage/relfilelocator.h" -#else -#include "storage/relfilenode.h" -#endif #define COLUMNAR_AM_NAME "columnar" #define COLUMNAR_MODULE_NAME "citus_columnar" diff --git a/src/include/columnar/columnar_metadata.h b/src/include/columnar/columnar_metadata.h index 81817f733..021c9bea4 100644 --- a/src/include/columnar/columnar_metadata.h +++ b/src/include/columnar/columnar_metadata.h @@ -17,11 +17,7 @@ #include "pg_version_compat.h" #include "pg_version_constants.h" -#if PG_VERSION_NUM >= PG_VERSION_16 #include "storage/relfilelocator.h" -#else -#include "storage/relfilenode.h" -#endif /* diff --git a/src/include/distributed/errormessage.h b/src/include/distributed/errormessage.h index 7a38d513c..3cdb3d056 100644 --- a/src/include/distributed/errormessage.h +++ b/src/include/distributed/errormessage.h @@ -38,7 +38,7 @@ typedef struct DeferredErrorMessage */ #define DeferredError(code, message, detail, hint) \ DeferredErrorInternal(code, message, detail, hint, __FILE__, __LINE__, \ - PG_FUNCNAME_MACRO) + __func__) DeferredErrorMessage * DeferredErrorInternal(int code, const char *message, const char *detail, const char *hint, diff --git a/src/include/distributed/relation_utils.h b/src/include/distributed/relation_utils.h index d3a5ab105..f57f33be9 100644 --- a/src/include/distributed/relation_utils.h +++ b/src/include/distributed/relation_utils.h @@ -14,15 +14,11 @@ #include "postgres.h" #include "pg_version_constants.h" -#if PG_VERSION_NUM >= PG_VERSION_16 #include "parser/parse_relation.h" -#endif #include "utils/relcache.h" extern char * RelationGetNamespaceName(Relation relation); -#if PG_VERSION_NUM >= PG_VERSION_16 extern RTEPermissionInfo * GetFilledPermissionInfo(Oid relid, bool inh, AclMode requiredPerms); -#endif #endif /* RELATION_UTILS_H */ diff --git a/src/include/distributed/resource_lock.h b/src/include/distributed/resource_lock.h index 0696ef6e8..0e6116593 100644 --- a/src/include/distributed/resource_lock.h +++ b/src/include/distributed/resource_lock.h @@ -179,9 +179,7 @@ IsNodeWideObjectClass(ObjectClass objectClass) case OCLASS_DATABASE: case OCLASS_TBLSPACE: case OCLASS_PARAMETER_ACL: -#if PG_VERSION_NUM >= PG_VERSION_16 case OCLASS_ROLE_MEMBERSHIP: -#endif { return true; } diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index a30925e4e..8e4757c6b 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -461,178 +461,6 @@ getStxstattarget_compat(HeapTuple tup) #endif -#if PG_VERSION_NUM >= PG_VERSION_16 - -#include "utils/guc_tables.h" - -#define pg_clean_ascii_compat(a, b) pg_clean_ascii(a, b) - -#define RelationPhysicalIdentifier_compat(a) ((a)->rd_locator) -#define RelationTablespace_compat(a) (a.spcOid) -#define RelationPhysicalIdentifierNumber_compat(a) (a.relNumber) -#define RelationPhysicalIdentifierNumberPtr_compat(a) (a->relNumber) -#define RelationPhysicalIdentifierBackend_compat(a) (a->smgr_rlocator.locator) - -#define float_abs(a) fabs(a) - -#define tuplesort_getdatum_compat(a, b, c, d, e, f) tuplesort_getdatum(a, b, c, d, e, f) - -static inline struct config_generic ** -get_guc_variables_compat(int *gucCount) -{ - return get_guc_variables(gucCount); -} - - -#define PG_FUNCNAME_MACRO __func__ - -#define stringToQualifiedNameList_compat(a) stringToQualifiedNameList(a, NULL) -#define typeStringToTypeName_compat(a, b) typeStringToTypeName(a, b) - -#define get_relids_in_jointree_compat(a, b, c) get_relids_in_jointree(a, b, c) - -#define object_ownercheck(a, b, c) object_ownercheck(a, b, c) -#define object_aclcheck(a, b, c, d) object_aclcheck(a, b, c, d) - -#define pgstat_fetch_stat_local_beentry(a) pgstat_get_local_beentry_by_index(a) - -#define have_createdb_privilege() have_createdb_privilege() - -#else - -#include "miscadmin.h" - -#include "catalog/pg_authid.h" -#include "catalog/pg_class_d.h" -#include "catalog/pg_database_d.h" -#include "catalog/pg_namespace.h" -#include "catalog/pg_proc_d.h" -#include "storage/relfilenode.h" -#include "utils/guc.h" -#include "utils/guc_tables.h" -#include "utils/syscache.h" - -#define pg_clean_ascii_compat(a, b) pg_clean_ascii(a) - -#define RelationPhysicalIdentifier_compat(a) ((a)->rd_node) -#define RelationTablespace_compat(a) (a.spcNode) -#define RelationPhysicalIdentifierNumber_compat(a) (a.relNode) -#define RelationPhysicalIdentifierNumberPtr_compat(a) (a->relNode) -#define RelationPhysicalIdentifierBackend_compat(a) (a->smgr_rnode.node) -typedef RelFileNode RelFileLocator; -typedef Oid RelFileNumber; -#define RelidByRelfilenumber(a, b) RelidByRelfilenode(a, b) - -#define float_abs(a) Abs(a) - -#define tuplesort_getdatum_compat(a, b, c, d, e, f) tuplesort_getdatum(a, b, d, e, f) - -static inline struct config_generic ** -get_guc_variables_compat(int *gucCount) -{ - *gucCount = GetNumConfigOptions(); - return get_guc_variables(); -} - - -#define stringToQualifiedNameList_compat(a) stringToQualifiedNameList(a) -#define typeStringToTypeName_compat(a, b) typeStringToTypeName(a) - -#define get_relids_in_jointree_compat(a, b, c) get_relids_in_jointree(a, b) - -static inline bool -object_ownercheck(Oid classid, Oid objectid, Oid roleid) -{ - switch (classid) - { - case RelationRelationId: - { - return pg_class_ownercheck(objectid, roleid); - } - - case NamespaceRelationId: - { - return pg_namespace_ownercheck(objectid, roleid); - } - - case ProcedureRelationId: - { - return pg_proc_ownercheck(objectid, roleid); - } - - case DatabaseRelationId: - { - return pg_database_ownercheck(objectid, roleid); - } - - default: - { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Missing classid:%d", - classid))); - } - } -} - - -static inline AclResult -object_aclcheck(Oid classid, Oid objectid, Oid roleid, AclMode mode) -{ - switch (classid) - { - case NamespaceRelationId: - { - return pg_namespace_aclcheck(objectid, roleid, mode); - } - - case ProcedureRelationId: - { - return pg_proc_aclcheck(objectid, roleid, mode); - } - - default: - { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Missing classid:%d", - classid))); - } - } -} - - -static inline bool -have_createdb_privilege(void) -{ - bool result = false; - HeapTuple utup; - - /* Superusers can always do everything */ - if (superuser()) - { - return true; - } - - utup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(GetUserId())); - if (HeapTupleIsValid(utup)) - { - result = ((Form_pg_authid) GETSTRUCT(utup))->rolcreatedb; - ReleaseSysCache(utup); - } - return result; -} - - -typedef bool TU_UpdateIndexes; - -/* - * we define RTEPermissionInfo for PG16 compatibility - * There are some functions that need to include RTEPermissionInfo in their signature - * for PG14/PG15 we pass a NULL argument in these functions - */ -typedef RangeTblEntry RTEPermissionInfo; - -#endif - #define SetListCellPtr(a, b) ((a)->ptr_value = (b)) #define RangeTableEntryFromNSItem(a) ((a)->p_rte) #define fcGetArgValue(fc, n) ((fc)->args[n].value) diff --git a/src/include/pg_version_constants.h b/src/include/pg_version_constants.h index b3f293a2c..b4323c22f 100644 --- a/src/include/pg_version_constants.h +++ b/src/include/pg_version_constants.h @@ -11,7 +11,6 @@ #ifndef PG_VERSION_CONSTANTS #define PG_VERSION_CONSTANTS -#define PG_VERSION_15 150000 #define PG_VERSION_16 160000 #define PG_VERSION_17 170000 #define PG_VERSION_18 180000 diff --git a/src/test/regress/expected/alter_table_set_access_method.out b/src/test/regress/expected/alter_table_set_access_method.out index fe8b8622a..26047c897 100644 --- a/src/test/regress/expected/alter_table_set_access_method.out +++ b/src/test/regress/expected/alter_table_set_access_method.out @@ -776,15 +776,8 @@ RESET client_min_messages; create table events (event_id bigserial, event_time timestamptz default now(), payload text); create index on events (event_id); insert into events (payload) select 'hello-'||s from generate_series(1,10) s; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset BEGIN; - \if :server_version_ge_16 SET LOCAL debug_parallel_query = regress; - \else - SET LOCAL force_parallel_mode = regress; - \endif SET LOCAL min_parallel_table_scan_size = 1; SET LOCAL parallel_tuple_cost = 0; SET LOCAL max_parallel_workers = 4; diff --git a/src/test/regress/expected/columnar_fallback_scan.out b/src/test/regress/expected/columnar_fallback_scan.out index b65e4118c..d4f3ee504 100644 --- a/src/test/regress/expected/columnar_fallback_scan.out +++ b/src/test/regress/expected/columnar_fallback_scan.out @@ -21,14 +21,7 @@ select count(*), min(i), max(i), avg(i) from fallback_scan; -- Negative test: try to force a parallel plan with at least two -- workers, but columnar should reject it and use a non-parallel scan. -- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset -\if :server_version_ge_16 set debug_parallel_query = regress; -\else -set force_parallel_mode = regress; -\endif set min_parallel_table_scan_size = 1; set parallel_tuple_cost = 0; set max_parallel_workers = 4; @@ -46,11 +39,7 @@ select count(*), min(i), max(i), avg(i) from fallback_scan; 150000 | 1 | 150000 | 75000.500000000000 (1 row) -\if :server_version_ge_16 set debug_parallel_query = default; -\else -set force_parallel_mode = default; -\endif set min_parallel_table_scan_size to default; set parallel_tuple_cost to default; set max_parallel_workers to default; diff --git a/src/test/regress/expected/columnar_indexes.out b/src/test/regress/expected/columnar_indexes.out index 341dc6b63..7d292dbca 100644 --- a/src/test/regress/expected/columnar_indexes.out +++ b/src/test/regress/expected/columnar_indexes.out @@ -556,9 +556,6 @@ create table events (event_id bigserial, event_time timestamptz default now(), p BEGIN; -- this wouldn't flush any data insert into events (payload) select 'hello-'||s from generate_series(1, 10) s; - SHOW server_version \gset - SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 - \gset -- Since table is large enough, normally postgres would prefer using -- parallel workers when building the index. -- @@ -570,11 +567,7 @@ BEGIN; -- by postgres and throws an error. For this reason, here we don't expect -- following commnad to fail since we prevent using parallel workers for -- columnar tables. - \if :server_version_ge_16 SET LOCAL debug_parallel_query = regress; - \else - SET LOCAL force_parallel_mode = regress; - \endif SET LOCAL min_parallel_table_scan_size = 1; SET LOCAL parallel_tuple_cost = 0; SET LOCAL max_parallel_workers = 4; diff --git a/src/test/regress/expected/columnar_partitioning.out b/src/test/regress/expected/columnar_partitioning.out index bd7bbdc25..ca2b35dd8 100644 --- a/src/test/regress/expected/columnar_partitioning.out +++ b/src/test/regress/expected/columnar_partitioning.out @@ -20,15 +20,8 @@ INSERT INTO parent SELECT '2020-03-15', 30, 300, 'three thousand' FROM generate_series(1,100000); INSERT INTO parent SELECT '2020-04-15', 30, 300, 'three thousand' FROM generate_series(1,100000); -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset -- run parallel plans -\if :server_version_ge_16 SET debug_parallel_query = regress; -\else -SET force_parallel_mode = regress; -\endif SET min_parallel_table_scan_size = 1; SET parallel_tuple_cost = 0; SET max_parallel_workers = 4; @@ -104,11 +97,7 @@ SELECT count(*), sum(i), min(i), max(i) FROM parent; (1 row) SET columnar.enable_custom_scan TO DEFAULT; -\if :server_version_ge_16 SET debug_parallel_query TO DEFAULT; -\else -SET force_parallel_mode TO DEFAULT; -\endif SET min_parallel_table_scan_size TO DEFAULT; SET parallel_tuple_cost TO DEFAULT; SET max_parallel_workers TO DEFAULT; diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15.out b/src/test/regress/expected/create_drop_database_propagation_pg15.out index 9a501558a..e1646dac6 100644 --- a/src/test/regress/expected/create_drop_database_propagation_pg15.out +++ b/src/test/regress/expected/create_drop_database_propagation_pg15.out @@ -1,13 +1,3 @@ --- --- PG15 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q -\endif -- create/drop database for pg >= 15 set citus.enable_create_database_propagation=on; CREATE DATABASE mydatabase diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15_0.out b/src/test/regress/expected/create_drop_database_propagation_pg15_0.out deleted file mode 100644 index b1ed9cc5b..000000000 --- a/src/test/regress/expected/create_drop_database_propagation_pg15_0.out +++ /dev/null @@ -1,9 +0,0 @@ --- --- PG15 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q diff --git a/src/test/regress/expected/create_drop_database_propagation_pg16.out b/src/test/regress/expected/create_drop_database_propagation_pg16.out index 75cd99e61..0245ac9e6 100644 --- a/src/test/regress/expected/create_drop_database_propagation_pg16.out +++ b/src/test/regress/expected/create_drop_database_propagation_pg16.out @@ -1,13 +1,3 @@ --- --- PG16 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset -\if :server_version_ge_16 -\else -\q -\endif -- create/drop database for pg >= 16 set citus.enable_create_database_propagation=on; -- test icu_rules diff --git a/src/test/regress/expected/create_drop_database_propagation_pg16_0.out b/src/test/regress/expected/create_drop_database_propagation_pg16_0.out deleted file mode 100644 index 730c916ca..000000000 --- a/src/test/regress/expected/create_drop_database_propagation_pg16_0.out +++ /dev/null @@ -1,9 +0,0 @@ --- --- PG16 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset -\if :server_version_ge_16 -\else -\q diff --git a/src/test/regress/expected/merge_unsupported.out b/src/test/regress/expected/merge_unsupported.out index 62f51a679..af465d3a9 100644 --- a/src/test/regress/expected/merge_unsupported.out +++ b/src/test/regress/expected/merge_unsupported.out @@ -2,7 +2,6 @@ SHOW server_version \gset SELECT CASE WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+' WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16' - WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14' ELSE 'Unsupported version' END AS version_category; version_category @@ -10,12 +9,6 @@ SELECT CASE 17+ (1 row) -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q -\endif -- -- MERGE test from PG community (adapted to Citus by converting all tables to Citus local) -- diff --git a/src/test/regress/expected/merge_unsupported_0.out b/src/test/regress/expected/merge_unsupported_0.out index b788c1670..e322a0f1e 100644 --- a/src/test/regress/expected/merge_unsupported_0.out +++ b/src/test/regress/expected/merge_unsupported_0.out @@ -2,7 +2,6 @@ SHOW server_version \gset SELECT CASE WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+' WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16' - WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14' ELSE 'Unsupported version' END AS version_category; version_category @@ -10,12 +9,6 @@ SELECT CASE 15_16 (1 row) -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q -\endif -- -- MERGE test from PG community (adapted to Citus by converting all tables to Citus local) -- diff --git a/src/test/regress/expected/merge_unsupported_1.out b/src/test/regress/expected/merge_unsupported_1.out deleted file mode 100644 index 187c5d630..000000000 --- a/src/test/regress/expected/merge_unsupported_1.out +++ /dev/null @@ -1,17 +0,0 @@ -SHOW server_version \gset -SELECT CASE - WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+' - WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16' - WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14' - ELSE 'Unsupported version' - END AS version_category; - version_category ---------------------------------------------------------------------- - 14 -(1 row) - -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 6345e15ac..1e2ab9601 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -1287,21 +1287,14 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the vartype of table from int to bigint -- so that making two tables colocated fails - -- include varnullingrels for PG16+ - SHOW server_version \gset - SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 - \gset -- include varreturningtype for PG18+ SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset \if :server_version_ge_18 UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varreturningtype 0 :varnoold 1 :varoattno 1 :location -1}' WHERE logicalrelid = 'test_2'::regclass; - \elif :server_version_ge_16 - UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}' - WHERE logicalrelid = 'test_2'::regclass; \else - UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}' + UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}' WHERE logicalrelid = 'test_2'::regclass; \endif SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); diff --git a/src/test/regress/expected/multi_complex_count_distinct.out b/src/test/regress/expected/multi_complex_count_distinct.out index baa9c829a..85be7fde8 100644 --- a/src/test/regress/expected/multi_complex_count_distinct.out +++ b/src/test/regress/expected/multi_complex_count_distinct.out @@ -1,18 +1,6 @@ -- -- COMPLEX_COUNT_DISTINCT -- --- This test file has an alternative output because of the following in PG16: --- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e --- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3 --- The alternative output can be deleted when we drop support for PG15 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; - server_version_ge_16 ---------------------------------------------------------------------- - t -(1 row) - SET citus.next_shard_id TO 240000; SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/expected/multi_complex_count_distinct_0.out b/src/test/regress/expected/multi_complex_count_distinct_0.out deleted file mode 100644 index 36af62e96..000000000 --- a/src/test/regress/expected/multi_complex_count_distinct_0.out +++ /dev/null @@ -1,1139 +0,0 @@ --- --- COMPLEX_COUNT_DISTINCT --- --- This test file has an alternative output because of the following in PG16: --- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e --- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3 --- The alternative output can be deleted when we drop support for PG15 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; - server_version_ge_16 ---------------------------------------------------------------------- - f -(1 row) - -SET citus.next_shard_id TO 240000; -SET citus.shard_count TO 8; -SET citus.shard_replication_factor TO 1; -SET citus.coordinator_aggregation_strategy TO 'disabled'; -CREATE TABLE lineitem_hash ( - l_orderkey bigint not null, - l_partkey integer not null, - l_suppkey integer not null, - l_linenumber integer not null, - l_quantity decimal(15, 2) not null, - l_extendedprice decimal(15, 2) not null, - l_discount decimal(15, 2) not null, - l_tax decimal(15, 2) not null, - l_returnflag char(1) not null, - l_linestatus char(1) not null, - l_shipdate date not null, - l_commitdate date not null, - l_receiptdate date not null, - l_shipinstruct char(25) not null, - l_shipmode char(10) not null, - l_comment varchar(44) not null, - PRIMARY KEY(l_orderkey, l_linenumber) ); -SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data' -\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data' -\set client_side_copy_command '\\copy lineitem_hash FROM ' :'lineitem_1_data_file' ' with delimiter '''|''';' -:client_side_copy_command -\set client_side_copy_command '\\copy lineitem_hash FROM ' :'lineitem_2_data_file' ' with delimiter '''|''';' -:client_side_copy_command -ANALYZE lineitem_hash; --- count(distinct) is supported on top level query if there --- is a grouping on the partition key -SELECT - l_orderkey, count(DISTINCT l_partkey) - FROM lineitem_hash - GROUP BY l_orderkey - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - l_orderkey | count ---------------------------------------------------------------------- - 14885 | 7 - 14884 | 7 - 14821 | 7 - 14790 | 7 - 14785 | 7 - 14755 | 7 - 14725 | 7 - 14694 | 7 - 14627 | 7 - 14624 | 7 -(10 rows) - -EXPLAIN (COSTS false, VERBOSE true) -SELECT - l_orderkey, count(DISTINCT l_partkey) - FROM lineitem_hash - GROUP BY l_orderkey - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------- - Limit - Output: remote_scan.l_orderkey, remote_scan.count - -> Sort - Output: remote_scan.l_orderkey, remote_scan.count - Sort Key: remote_scan.count DESC, remote_scan.l_orderkey DESC - -> Custom Scan (Citus Adaptive) - Output: remote_scan.l_orderkey, remote_scan.count - Task Count: 8 - Tasks Shown: One of 8 - -> Task - Query: SELECT l_orderkey, count(DISTINCT l_partkey) AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_orderkey ORDER BY (count(DISTINCT l_partkey)) DESC, l_orderkey DESC LIMIT '10'::bigint - Node: host=localhost port=xxxxx dbname=regression - -> Limit - Output: l_orderkey, (count(DISTINCT l_partkey)) - -> Sort - Output: l_orderkey, (count(DISTINCT l_partkey)) - Sort Key: (count(DISTINCT lineitem_hash.l_partkey)) DESC, lineitem_hash.l_orderkey DESC - -> GroupAggregate - Output: l_orderkey, count(DISTINCT l_partkey) - Group Key: lineitem_hash.l_orderkey - -> Index Scan Backward using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -(22 rows) - --- it is also supported if there is no grouping or grouping is on non-partition field -SELECT - count(DISTINCT l_partkey) - FROM lineitem_hash - ORDER BY 1 DESC - LIMIT 10; - count ---------------------------------------------------------------------- - 11661 -(1 row) - -EXPLAIN (COSTS false, VERBOSE true) -SELECT - count(DISTINCT l_partkey) - FROM lineitem_hash - ORDER BY 1 DESC - LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------- - Limit - Output: (count(DISTINCT remote_scan.count)) - -> Sort - Output: (count(DISTINCT remote_scan.count)) - Sort Key: (count(DISTINCT remote_scan.count)) DESC - -> Aggregate - Output: count(DISTINCT remote_scan.count) - -> Custom Scan (Citus Adaptive) - Output: remote_scan.count - Task Count: 8 - Tasks Shown: One of 8 - -> Task - Query: SELECT l_partkey AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_partkey - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: l_partkey - Group Key: lineitem_hash.l_partkey - -> Seq Scan on public.lineitem_hash_240000 lineitem_hash - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -(19 rows) - -SELECT - l_shipmode, count(DISTINCT l_partkey) - FROM lineitem_hash - GROUP BY l_shipmode - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - l_shipmode | count ---------------------------------------------------------------------- - TRUCK | 1757 - MAIL | 1730 - AIR | 1702 - FOB | 1700 - RAIL | 1696 - SHIP | 1684 - REG AIR | 1676 -(7 rows) - -EXPLAIN (COSTS false, VERBOSE true) -SELECT - l_shipmode, count(DISTINCT l_partkey) - FROM lineitem_hash - GROUP BY l_shipmode - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------- - Limit - Output: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count)) - -> Sort - Output: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count)) - Sort Key: (count(DISTINCT remote_scan.count)) DESC, remote_scan.l_shipmode DESC - -> GroupAggregate - Output: remote_scan.l_shipmode, count(DISTINCT remote_scan.count) - Group Key: remote_scan.l_shipmode - -> Sort - Output: remote_scan.l_shipmode, remote_scan.count - Sort Key: remote_scan.l_shipmode DESC - -> Custom Scan (Citus Adaptive) - Output: remote_scan.l_shipmode, remote_scan.count - Task Count: 8 - Tasks Shown: One of 8 - -> Task - Query: SELECT l_shipmode, l_partkey AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_shipmode, l_partkey - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: l_shipmode, l_partkey - Group Key: lineitem_hash.l_shipmode, lineitem_hash.l_partkey - -> Seq Scan on public.lineitem_hash_240000 lineitem_hash - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -(23 rows) - --- mixed mode count distinct, grouped by partition column -SELECT - l_orderkey, count(distinct l_partkey), count(distinct l_shipmode) - FROM lineitem_hash - GROUP BY l_orderkey - ORDER BY 3 DESC, 2 DESC, 1 - LIMIT 10; - l_orderkey | count | count ---------------------------------------------------------------------- - 226 | 7 | 7 - 1316 | 7 | 7 - 1477 | 7 | 7 - 3555 | 7 | 7 - 12258 | 7 | 7 - 12835 | 7 | 7 - 768 | 7 | 6 - 1121 | 7 | 6 - 1153 | 7 | 6 - 1281 | 7 | 6 -(10 rows) - -EXPLAIN (COSTS false, VERBOSE true) -SELECT - l_orderkey, count(distinct l_partkey), count(distinct l_shipmode) - FROM lineitem_hash - GROUP BY l_orderkey - ORDER BY 3 DESC, 2 DESC, 1 - LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------- - Limit - Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 - -> Sort - Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 - Sort Key: remote_scan.count_1 DESC, remote_scan.count DESC, remote_scan.l_orderkey - -> Custom Scan (Citus Adaptive) - Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 - Task Count: 8 - Tasks Shown: One of 8 - -> Task - Query: SELECT l_orderkey, count(DISTINCT l_partkey) AS count, count(DISTINCT l_shipmode) AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_orderkey ORDER BY (count(DISTINCT l_shipmode)) DESC, (count(DISTINCT l_partkey)) DESC, l_orderkey LIMIT '10'::bigint - Node: host=localhost port=xxxxx dbname=regression - -> Limit - Output: l_orderkey, (count(DISTINCT l_partkey)), (count(DISTINCT l_shipmode)) - -> Sort - Output: l_orderkey, (count(DISTINCT l_partkey)), (count(DISTINCT l_shipmode)) - Sort Key: (count(DISTINCT lineitem_hash.l_shipmode)) DESC, (count(DISTINCT lineitem_hash.l_partkey)) DESC, lineitem_hash.l_orderkey - -> GroupAggregate - Output: l_orderkey, count(DISTINCT l_partkey), count(DISTINCT l_shipmode) - Group Key: lineitem_hash.l_orderkey - -> Index Scan using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -(22 rows) - --- partition/non-partition column count distinct no grouping -SELECT - count(distinct l_orderkey), count(distinct l_partkey), count(distinct l_shipmode) - FROM lineitem_hash; - count | count | count ---------------------------------------------------------------------- - 2985 | 11661 | 7 -(1 row) - -EXPLAIN (COSTS false, VERBOSE true) -SELECT - count(distinct l_orderkey), count(distinct l_partkey), count(distinct l_shipmode) - FROM lineitem_hash; - QUERY PLAN ---------------------------------------------------------------------- - Aggregate - Output: count(DISTINCT remote_scan.count), count(DISTINCT remote_scan.count_1), count(DISTINCT remote_scan.count_2) - -> Custom Scan (Citus Adaptive) - Output: remote_scan.count, remote_scan.count_1, remote_scan.count_2 - Task Count: 8 - Tasks Shown: One of 8 - -> Task - Query: SELECT l_orderkey AS count, l_partkey AS count, l_shipmode AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_orderkey, l_partkey, l_shipmode - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: l_orderkey, l_partkey, l_shipmode - Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode - -> Seq Scan on public.lineitem_hash_240000 lineitem_hash - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -(14 rows) - --- distinct/non-distinct on partition and non-partition columns -SELECT - count(distinct l_orderkey), count(l_orderkey), - count(distinct l_partkey), count(l_partkey), - count(distinct l_shipmode), count(l_shipmode) - FROM lineitem_hash; - count | count | count | count | count | count ---------------------------------------------------------------------- - 2985 | 12000 | 11661 | 12000 | 7 | 12000 -(1 row) - --- mixed mode count distinct, grouped by non-partition column -SELECT - l_shipmode, count(distinct l_partkey), count(distinct l_orderkey) - FROM lineitem_hash - GROUP BY l_shipmode - ORDER BY 1, 2 DESC, 3 DESC; - l_shipmode | count | count ---------------------------------------------------------------------- - AIR | 1702 | 1327 - FOB | 1700 | 1276 - MAIL | 1730 | 1299 - RAIL | 1696 | 1265 - REG AIR | 1676 | 1275 - SHIP | 1684 | 1289 - TRUCK | 1757 | 1333 -(7 rows) - --- mixed mode count distinct, grouped by non-partition column --- having on partition column -SELECT - l_shipmode, count(distinct l_partkey), count(distinct l_orderkey) - FROM lineitem_hash - GROUP BY l_shipmode - HAVING count(distinct l_orderkey) > 1300 - ORDER BY 1, 2 DESC; - l_shipmode | count | count ---------------------------------------------------------------------- - AIR | 1702 | 1327 - TRUCK | 1757 | 1333 -(2 rows) - --- same but having clause is not on target list -SELECT - l_shipmode, count(distinct l_partkey) - FROM lineitem_hash - GROUP BY l_shipmode - HAVING count(distinct l_orderkey) > 1300 - ORDER BY 1, 2 DESC; - l_shipmode | count ---------------------------------------------------------------------- - AIR | 1702 - TRUCK | 1757 -(2 rows) - --- mixed mode count distinct, grouped by non-partition column --- having on non-partition column -SELECT - l_shipmode, count(distinct l_partkey), count(distinct l_suppkey) - FROM lineitem_hash - GROUP BY l_shipmode - HAVING count(distinct l_suppkey) > 1550 - ORDER BY 1, 2 DESC; - l_shipmode | count | count ---------------------------------------------------------------------- - AIR | 1702 | 1564 - FOB | 1700 | 1571 - MAIL | 1730 | 1573 - RAIL | 1696 | 1581 - REG AIR | 1676 | 1557 - SHIP | 1684 | 1554 - TRUCK | 1757 | 1602 -(7 rows) - --- same but having clause is not on target list -SELECT - l_shipmode, count(distinct l_partkey) - FROM lineitem_hash - GROUP BY l_shipmode - HAVING count(distinct l_suppkey) > 1550 - ORDER BY 1, 2 DESC; - l_shipmode | count ---------------------------------------------------------------------- - AIR | 1702 - FOB | 1700 - MAIL | 1730 - RAIL | 1696 - REG AIR | 1676 - SHIP | 1684 - TRUCK | 1757 -(7 rows) - --- count distinct is supported on single table subqueries -SELECT * - FROM ( - SELECT - l_orderkey, count(DISTINCT l_partkey) - FROM lineitem_hash - GROUP BY l_orderkey) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - l_orderkey | count ---------------------------------------------------------------------- - 14885 | 7 - 14884 | 7 - 14821 | 7 - 14790 | 7 - 14785 | 7 - 14755 | 7 - 14725 | 7 - 14694 | 7 - 14627 | 7 - 14624 | 7 -(10 rows) - -SELECT * - FROM ( - SELECT - l_partkey, count(DISTINCT l_orderkey) - FROM lineitem_hash - GROUP BY l_partkey) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - l_partkey | count ---------------------------------------------------------------------- - 199146 | 3 - 188804 | 3 - 177771 | 3 - 160895 | 3 - 149926 | 3 - 136884 | 3 - 87761 | 3 - 15283 | 3 - 6983 | 3 - 1927 | 3 -(10 rows) - -EXPLAIN (COSTS false, VERBOSE true) -SELECT * - FROM ( - SELECT - l_partkey, count(DISTINCT l_orderkey) - FROM lineitem_hash - GROUP BY l_partkey) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (Citus Adaptive) - Output: remote_scan.l_partkey, remote_scan.count - -> Distributed Subplan XXX_1 - -> HashAggregate - Output: remote_scan.l_partkey, COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint) - Group Key: remote_scan.l_partkey - -> Custom Scan (Citus Adaptive) - Output: remote_scan.l_partkey, remote_scan.count - Task Count: 8 - Tasks Shown: One of 8 - -> Task - Query: SELECT l_partkey, count(DISTINCT l_orderkey) AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_partkey - Node: host=localhost port=xxxxx dbname=regression - -> GroupAggregate - Output: l_partkey, count(DISTINCT l_orderkey) - Group Key: lineitem_hash.l_partkey - -> Sort - Output: l_partkey, l_orderkey - Sort Key: lineitem_hash.l_partkey - -> Seq Scan on public.lineitem_hash_240000 lineitem_hash - Output: l_partkey, l_orderkey - Task Count: 1 - Tasks Shown: All - -> Task - Query: SELECT l_partkey, count FROM (SELECT intermediate_result.l_partkey, intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_partkey integer, count bigint)) sub ORDER BY count DESC, l_partkey DESC LIMIT 10 - Node: host=localhost port=xxxxx dbname=regression - -> Limit - Output: intermediate_result.l_partkey, intermediate_result.count - -> Sort - Output: intermediate_result.l_partkey, intermediate_result.count - Sort Key: intermediate_result.count DESC, intermediate_result.l_partkey DESC - -> Function Scan on pg_catalog.read_intermediate_result intermediate_result - Output: intermediate_result.l_partkey, intermediate_result.count - Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) -(34 rows) - --- count distinct with filters -SELECT - l_orderkey, - count(DISTINCT l_suppkey) FILTER (WHERE l_shipmode = 'AIR'), - count(DISTINCT l_suppkey) - FROM lineitem_hash - GROUP BY l_orderkey - ORDER BY 2 DESC, 3 DESC, 1 - LIMIT 10; - l_orderkey | count | count ---------------------------------------------------------------------- - 4964 | 4 | 7 - 12005 | 4 | 7 - 5409 | 4 | 6 - 164 | 3 | 7 - 322 | 3 | 7 - 871 | 3 | 7 - 1156 | 3 | 7 - 1574 | 3 | 7 - 2054 | 3 | 7 - 2309 | 3 | 7 -(10 rows) - -EXPLAIN (COSTS false, VERBOSE true) -SELECT - l_orderkey, - count(DISTINCT l_suppkey) FILTER (WHERE l_shipmode = 'AIR'), - count(DISTINCT l_suppkey) - FROM lineitem_hash - GROUP BY l_orderkey - ORDER BY 2 DESC, 3 DESC, 1 - LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------- - Limit - Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 - -> Sort - Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 - Sort Key: remote_scan.count DESC, remote_scan.count_1 DESC, remote_scan.l_orderkey - -> Custom Scan (Citus Adaptive) - Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1 - Task Count: 8 - Tasks Shown: One of 8 - -> Task - Query: SELECT l_orderkey, count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar)) AS count, count(DISTINCT l_suppkey) AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_orderkey ORDER BY (count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar))) DESC, (count(DISTINCT l_suppkey)) DESC, l_orderkey LIMIT '10'::bigint - Node: host=localhost port=xxxxx dbname=regression - -> Limit - Output: l_orderkey, (count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode = 'AIR'::bpchar))), (count(DISTINCT l_suppkey)) - -> Sort - Output: l_orderkey, (count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode = 'AIR'::bpchar))), (count(DISTINCT l_suppkey)) - Sort Key: (count(DISTINCT lineitem_hash.l_suppkey) FILTER (WHERE (lineitem_hash.l_shipmode = 'AIR'::bpchar))) DESC, (count(DISTINCT lineitem_hash.l_suppkey)) DESC, lineitem_hash.l_orderkey - -> GroupAggregate - Output: l_orderkey, count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode = 'AIR'::bpchar)), count(DISTINCT l_suppkey) - Group Key: lineitem_hash.l_orderkey - -> Index Scan using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -(22 rows) - --- group by on non-partition column -SELECT - l_suppkey, count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR') - FROM lineitem_hash - GROUP BY l_suppkey - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - l_suppkey | count ---------------------------------------------------------------------- - 7680 | 4 - 7703 | 3 - 7542 | 3 - 7072 | 3 - 6335 | 3 - 5873 | 3 - 1318 | 3 - 1042 | 3 - 160 | 3 - 9872 | 2 -(10 rows) - --- explaining the same query fails -EXPLAIN (COSTS false, VERBOSE true) -SELECT - l_suppkey, count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR') - FROM lineitem_hash - GROUP BY l_suppkey - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------- - Limit - Output: remote_scan.l_suppkey, (count(DISTINCT remote_scan.count) FILTER (WHERE (remote_scan.count_1 = 'AIR'::bpchar))) - -> Sort - Output: remote_scan.l_suppkey, (count(DISTINCT remote_scan.count) FILTER (WHERE (remote_scan.count_1 = 'AIR'::bpchar))) - Sort Key: (count(DISTINCT remote_scan.count) FILTER (WHERE (remote_scan.count_1 = 'AIR'::bpchar))) DESC, remote_scan.l_suppkey DESC - -> GroupAggregate - Output: remote_scan.l_suppkey, count(DISTINCT remote_scan.count) FILTER (WHERE (remote_scan.count_1 = 'AIR'::bpchar)) - Group Key: remote_scan.l_suppkey - -> Sort - Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1 - Sort Key: remote_scan.l_suppkey DESC - -> Custom Scan (Citus Adaptive) - Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1 - Task Count: 8 - Tasks Shown: One of 8 - -> Task - Query: SELECT l_suppkey, l_partkey AS count, l_shipmode AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_suppkey, l_partkey, l_shipmode - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: l_suppkey, l_partkey, l_shipmode - Group Key: lineitem_hash.l_suppkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode - -> Seq Scan on public.lineitem_hash_240000 lineitem_hash - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -(23 rows) - --- without group by, on partition column -SELECT - count(DISTINCT l_orderkey) FILTER (WHERE l_shipmode = 'AIR') - FROM lineitem_hash; - count ---------------------------------------------------------------------- - 1327 -(1 row) - --- without group by, on non-partition column -SELECT - count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR') - FROM lineitem_hash; - count ---------------------------------------------------------------------- - 1702 -(1 row) - -SELECT - count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR'), - count(DISTINCT l_partkey), - count(DISTINCT l_shipdate) - FROM lineitem_hash; - count | count | count ---------------------------------------------------------------------- - 1702 | 11661 | 2470 -(1 row) - --- filter column already exists in target list -SELECT * - FROM ( - SELECT - l_orderkey, count(DISTINCT l_partkey) FILTER (WHERE l_orderkey > 100) - FROM lineitem_hash - GROUP BY l_orderkey) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - l_orderkey | count ---------------------------------------------------------------------- - 14885 | 7 - 14884 | 7 - 14821 | 7 - 14790 | 7 - 14785 | 7 - 14755 | 7 - 14725 | 7 - 14694 | 7 - 14627 | 7 - 14624 | 7 -(10 rows) - --- filter column does not exist in target list -SELECT * - FROM ( - SELECT - l_orderkey, count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR') - FROM lineitem_hash - GROUP BY l_orderkey) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - l_orderkey | count ---------------------------------------------------------------------- - 12005 | 4 - 5409 | 4 - 4964 | 4 - 14848 | 3 - 14496 | 3 - 13473 | 3 - 13122 | 3 - 12929 | 3 - 12645 | 3 - 12417 | 3 -(10 rows) - --- case expr in count distinct is supported. --- count orders partkeys if l_shipmode is air -SELECT * - FROM ( - SELECT - l_orderkey, count(DISTINCT CASE WHEN l_shipmode = 'AIR' THEN l_partkey ELSE NULL END) as count - FROM lineitem_hash - GROUP BY l_orderkey) sub - WHERE count > 0 - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - l_orderkey | count ---------------------------------------------------------------------- - 12005 | 4 - 5409 | 4 - 4964 | 4 - 14848 | 3 - 14496 | 3 - 13473 | 3 - 13122 | 3 - 12929 | 3 - 12645 | 3 - 12417 | 3 -(10 rows) - --- text like operator is also supported -SELECT * - FROM ( - SELECT - l_orderkey, count(DISTINCT CASE WHEN l_shipmode like '%A%' THEN l_partkey ELSE NULL END) as count - FROM lineitem_hash - GROUP BY l_orderkey) sub - WHERE count > 0 - ORDER BY 2 DESC, 1 DESC - LIMIT 10; - l_orderkey | count ---------------------------------------------------------------------- - 14275 | 7 - 14181 | 7 - 13605 | 7 - 12707 | 7 - 12384 | 7 - 11746 | 7 - 10727 | 7 - 10467 | 7 - 5636 | 7 - 4614 | 7 -(10 rows) - --- count distinct is rejected if it does not reference any columns -SELECT * - FROM ( - SELECT - l_linenumber, count(DISTINCT 1) - FROM lineitem_hash - GROUP BY l_linenumber) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; -ERROR: cannot compute aggregate (distinct) -DETAIL: aggregate (distinct) with no columns is unsupported -HINT: You can load the hll extension from contrib packages and enable distinct approximations. --- count distinct is rejected if it does not reference any columns -SELECT * - FROM ( - SELECT - l_linenumber, count(DISTINCT (random() * 5)::int) - FROM lineitem_hash - GROUP BY l_linenumber) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; -ERROR: cannot compute aggregate (distinct) -DETAIL: aggregate (distinct) with no columns is unsupported -HINT: You can load the hll extension from contrib packages and enable distinct approximations. --- even non-const function calls are supported within count distinct -SELECT * - FROM ( - SELECT - l_orderkey, count(DISTINCT (random() * 5)::int = l_linenumber) - FROM lineitem_hash - GROUP BY l_orderkey) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 0; - l_orderkey | count ---------------------------------------------------------------------- -(0 rows) - --- multiple nested subquery -SELECT - total, - avg(avg_count) as total_avg_count - FROM ( - SELECT - number_sum, - count(DISTINCT l_suppkey) as total, - avg(total_count) avg_count - FROM ( - SELECT - l_suppkey, - sum(l_linenumber) as number_sum, - count(DISTINCT l_shipmode) as total_count - FROM - lineitem_hash - WHERE - l_partkey > 100 and - l_quantity > 2 and - l_orderkey < 10000 - GROUP BY - l_suppkey) as distributed_table - WHERE - number_sum >= 10 - GROUP BY - number_sum) as distributed_table_2 - GROUP BY - total - ORDER BY - total_avg_count DESC; - total | total_avg_count ---------------------------------------------------------------------- - 1 | 3.6000000000000000 - 6 | 2.8333333333333333 - 10 | 2.6000000000000000 - 27 | 2.5555555555555556 - 32 | 2.4687500000000000 - 77 | 2.1948051948051948 - 57 | 2.1754385964912281 -(7 rows) - --- multiple cases query -SELECT * - FROM ( - SELECT - count(DISTINCT - CASE - WHEN l_shipmode = 'TRUCK' THEN l_partkey - WHEN l_shipmode = 'AIR' THEN l_quantity - WHEN l_shipmode = 'SHIP' THEN l_discount - ELSE l_suppkey - END) as count, - l_shipdate - FROM - lineitem_hash - GROUP BY - l_shipdate) sub - WHERE - count > 0 - ORDER BY - 1 DESC, 2 DESC - LIMIT 10; - count | l_shipdate ---------------------------------------------------------------------- - 14 | 07-30-1997 - 13 | 05-26-1998 - 13 | 08-08-1997 - 13 | 11-17-1995 - 13 | 01-09-1993 - 12 | 01-15-1998 - 12 | 10-15-1997 - 12 | 09-07-1997 - 12 | 06-02-1997 - 12 | 03-14-1997 -(10 rows) - --- count DISTINCT expression -SELECT * - FROM ( - SELECT - l_quantity, count(DISTINCT ((l_orderkey / 1000) * 1000 )) as count - FROM - lineitem_hash - GROUP BY - l_quantity) sub - WHERE - count > 0 - ORDER BY - 2 DESC, 1 DESC - LIMIT 10; - l_quantity | count ---------------------------------------------------------------------- - 48.00 | 13 - 47.00 | 13 - 37.00 | 13 - 33.00 | 13 - 26.00 | 13 - 25.00 | 13 - 23.00 | 13 - 21.00 | 13 - 15.00 | 13 - 12.00 | 13 -(10 rows) - --- count DISTINCT is part of an expression which includes another aggregate -SELECT * - FROM ( - SELECT - sum(((l_partkey * l_tax) / 100)) / - count(DISTINCT - CASE - WHEN l_shipmode = 'TRUCK' THEN l_partkey - ELSE l_suppkey - END) as avg, - l_shipmode - FROM - lineitem_hash - GROUP BY - l_shipmode) sub - ORDER BY - 1 DESC, 2 DESC - LIMIT 10; - avg | l_shipmode ---------------------------------------------------------------------- - 44.82904609027336300064 | MAIL - 44.80704536679536679537 | SHIP - 44.68891732736572890026 | AIR - 44.34106724470134874759 | REG AIR - 43.12739987269255251432 | FOB - 43.07299253636938646426 | RAIL - 40.50298377916903813318 | TRUCK -(7 rows) - --- count DISTINCT CASE WHEN expression -SELECT * - FROM ( - SELECT - count(DISTINCT - CASE - WHEN l_shipmode = 'TRUCK' THEN l_linenumber - WHEN l_shipmode = 'AIR' THEN l_linenumber + 10 - ELSE 2 - END) as avg - FROM - lineitem_hash - GROUP BY l_shipdate) sub - ORDER BY 1 DESC - LIMIT 10; - avg ---------------------------------------------------------------------- - 7 - 6 - 6 - 6 - 6 - 6 - 6 - 6 - 5 - 5 -(10 rows) - --- COUNT DISTINCT (c1, c2) -SELECT * - FROM - (SELECT - l_shipmode, - count(DISTINCT (l_shipdate, l_tax)) - FROM - lineitem_hash - GROUP BY - l_shipmode) t - ORDER BY - 2 DESC,1 DESC - LIMIT 10; - l_shipmode | count ---------------------------------------------------------------------- - TRUCK | 1689 - MAIL | 1683 - FOB | 1655 - AIR | 1650 - SHIP | 1644 - RAIL | 1636 - REG AIR | 1607 -(7 rows) - --- distinct on non-var (type cast/field select) columns are also --- supported if grouped on distribution column --- random is added to prevent flattening by postgresql -SELECT - l_orderkey, count(a::int), count(distinct a::int) - FROM ( - SELECT l_orderkey, l_orderkey * 1.5 a, random() b - FROM lineitem_hash) sub - GROUP BY 1 - ORDER BY 1 DESC - LIMIT 5; - l_orderkey | count | count ---------------------------------------------------------------------- - 14947 | 2 | 1 - 14946 | 2 | 1 - 14945 | 6 | 1 - 14944 | 2 | 1 - 14919 | 1 | 1 -(5 rows) - -SELECT user_id, - count(sub.a::int), - count(DISTINCT sub.a::int), - count(DISTINCT (sub).a) -FROM - (SELECT user_id, - unnest(ARRAY[user_id * 1.5])a, - random() b - FROM users_table - ) sub -GROUP BY 1 -ORDER BY 1 DESC -LIMIT 5; - user_id | count | count | count ---------------------------------------------------------------------- - 6 | 11 | 1 | 1 - 5 | 27 | 1 | 1 - 4 | 24 | 1 | 1 - 3 | 18 | 1 | 1 - 2 | 19 | 1 | 1 -(5 rows) - -CREATE TYPE test_item AS -( - id INTEGER, - duration INTEGER -); -CREATE TABLE test_count_distinct_array (key int, value int , value_arr test_item[]); -SELECT create_distributed_table('test_count_distinct_array', 'key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO test_count_distinct_array SELECT i, i, ARRAY[(i,i)::test_item] FROM generate_Series(0, 1000) i; -SELECT - key, - count(DISTINCT value), - count(DISTINCT (item)."id"), - count(DISTINCT (item)."id" * 3) -FROM - ( - SELECT key, unnest(value_arr) as item, value FROM test_count_distinct_array - ) as sub -GROUP BY 1 -ORDER BY 1 DESC -LIMIT 5; - key | count | count | count ---------------------------------------------------------------------- - 1000 | 1 | 1 | 1 - 999 | 1 | 1 | 1 - 998 | 1 | 1 | 1 - 997 | 1 | 1 | 1 - 996 | 1 | 1 | 1 -(5 rows) - -DROP TABLE test_count_distinct_array; -DROP TYPE test_item; --- other distinct aggregate are not supported -SELECT * - FROM ( - SELECT - l_linenumber, sum(DISTINCT l_partkey) - FROM lineitem_hash - GROUP BY l_linenumber) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; -ERROR: cannot compute aggregate (distinct) -DETAIL: table partitioning is unsuitable for aggregate (distinct) -SELECT * - FROM ( - SELECT - l_linenumber, avg(DISTINCT l_partkey) - FROM lineitem_hash - GROUP BY l_linenumber) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; -ERROR: cannot compute aggregate (distinct) -DETAIL: table partitioning is unsuitable for aggregate (distinct) --- whole row references, oid, and ctid are not supported in count distinct --- test table does not have oid or ctid enabled, so tests for them are skipped -SELECT * - FROM ( - SELECT - l_linenumber, count(DISTINCT lineitem_hash) - FROM lineitem_hash - GROUP BY l_linenumber) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; -ERROR: cannot compute count (distinct) -DETAIL: Non-column references are not supported yet -SELECT * - FROM ( - SELECT - l_linenumber, count(DISTINCT lineitem_hash.*) - FROM lineitem_hash - GROUP BY l_linenumber) sub - ORDER BY 2 DESC, 1 DESC - LIMIT 10; -ERROR: cannot compute count (distinct) -DETAIL: Non-column references are not supported yet --- count distinct pushdown is enabled -SELECT * - FROM ( - SELECT - l_shipdate, - count(DISTINCT - CASE - WHEN l_shipmode = 'TRUCK' THEN l_partkey - ELSE NULL - END) as distinct_part, - extract(year from l_shipdate) as year - FROM - lineitem_hash - GROUP BY l_shipdate, year) sub - WHERE year = 1995 - ORDER BY 2 DESC, 1 - LIMIT 10; - l_shipdate | distinct_part | year ---------------------------------------------------------------------- - 11-29-1995 | 5 | 1995 - 03-24-1995 | 4 | 1995 - 09-18-1995 | 4 | 1995 - 01-17-1995 | 3 | 1995 - 04-02-1995 | 3 | 1995 - 05-23-1995 | 3 | 1995 - 08-11-1995 | 3 | 1995 - 09-27-1995 | 3 | 1995 - 10-27-1995 | 3 | 1995 - 10-30-1995 | 3 | 1995 -(10 rows) - --- count distinct pushdown is enabled -SELECT * - FROM ( - SELECT - l_shipdate, - count(DISTINCT - CASE - WHEN l_shipmode = 'TRUCK' THEN l_partkey - ELSE NULL - END) as distinct_part, - extract(year from l_shipdate) as year - FROM - lineitem_hash - GROUP BY l_shipdate, year) sub - WHERE year = 1995 - ORDER BY 2 DESC, 1 - LIMIT 10; - l_shipdate | distinct_part | year ---------------------------------------------------------------------- - 11-29-1995 | 5 | 1995 - 03-24-1995 | 4 | 1995 - 09-18-1995 | 4 | 1995 - 01-17-1995 | 3 | 1995 - 04-02-1995 | 3 | 1995 - 05-23-1995 | 3 | 1995 - 08-11-1995 | 3 | 1995 - 09-27-1995 | 3 | 1995 - 10-27-1995 | 3 | 1995 - 10-30-1995 | 3 | 1995 -(10 rows) - -SELECT * - FROM ( - SELECT - l_shipdate, - count(DISTINCT - CASE - WHEN l_shipmode = 'TRUCK' THEN l_partkey - ELSE NULL - END) as distinct_part, - extract(year from l_shipdate) as year - FROM - lineitem_hash - GROUP BY l_shipdate) sub - WHERE year = 1995 - ORDER BY 2 DESC, 1 - LIMIT 10; - l_shipdate | distinct_part | year ---------------------------------------------------------------------- - 11-29-1995 | 5 | 1995 - 03-24-1995 | 4 | 1995 - 09-18-1995 | 4 | 1995 - 01-17-1995 | 3 | 1995 - 04-02-1995 | 3 | 1995 - 05-23-1995 | 3 | 1995 - 08-11-1995 | 3 | 1995 - 09-27-1995 | 3 | 1995 - 10-27-1995 | 3 | 1995 - 10-30-1995 | 3 | 1995 -(10 rows) - -DROP TABLE lineitem_hash; diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index ca97fc0c1..b7a911564 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -1,22 +1,11 @@ -- -- MULTI_EXPLAIN -- --- This test file has an alternative output because of the following in PG16: --- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e --- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3 --- The alternative output can be deleted when we drop support for PG15 --- -- This test file has an alternative output because of the following in PG18: -- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a -- The alternative output can be deleted when we drop support for PG17 -- SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; - server_version_ge_16 ---------------------------------------------------------------------- - t -(1 row) - SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18; server_version_ge_18 --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_explain_0.out b/src/test/regress/expected/multi_explain_0.out index 667f932dc..68245bfc3 100644 --- a/src/test/regress/expected/multi_explain_0.out +++ b/src/test/regress/expected/multi_explain_0.out @@ -1,22 +1,11 @@ -- -- MULTI_EXPLAIN -- --- This test file has an alternative output because of the following in PG16: --- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e --- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3 --- The alternative output can be deleted when we drop support for PG15 --- -- This test file has an alternative output because of the following in PG18: -- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a -- The alternative output can be deleted when we drop support for PG17 -- SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; - server_version_ge_16 ---------------------------------------------------------------------- - t -(1 row) - SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18; server_version_ge_18 --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_explain_1.out b/src/test/regress/expected/multi_explain_1.out deleted file mode 100644 index 13434c256..000000000 --- a/src/test/regress/expected/multi_explain_1.out +++ /dev/null @@ -1,3363 +0,0 @@ --- --- MULTI_EXPLAIN --- --- This test file has an alternative output because of the following in PG16: --- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e --- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3 --- The alternative output can be deleted when we drop support for PG15 --- --- This test file has an alternative output because of the following in PG18: --- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a --- The alternative output can be deleted when we drop support for PG17 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; - server_version_ge_16 ---------------------------------------------------------------------- - f -(1 row) - -SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18; - server_version_ge_18 ---------------------------------------------------------------------- - f -(1 row) - -SET citus.next_shard_id TO 570000; -\a\t -SET citus.explain_distributed_queries TO on; -SET citus.enable_repartition_joins to ON; --- Ensure tuple data in explain analyze output is the same on all PG versions -SET citus.enable_binary_protocol = TRUE; --- Function that parses explain output as JSON -CREATE OR REPLACE FUNCTION explain_json(query text) -RETURNS jsonb -AS $BODY$ -DECLARE - result jsonb; -BEGIN - EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; - RETURN result; -END; -$BODY$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION explain_analyze_json(query text) -RETURNS jsonb -AS $BODY$ -DECLARE - result jsonb; -BEGIN - EXECUTE format('EXPLAIN (ANALYZE TRUE, FORMAT JSON) %s', query) INTO result; - RETURN result; -END; -$BODY$ LANGUAGE plpgsql; --- Function that parses explain output as XML -CREATE OR REPLACE FUNCTION explain_xml(query text) -RETURNS xml -AS $BODY$ -DECLARE - result xml; -BEGIN - EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; - RETURN result; -END; -$BODY$ LANGUAGE plpgsql; --- Function that parses explain output as XML -CREATE OR REPLACE FUNCTION explain_analyze_xml(query text) -RETURNS xml -AS $BODY$ -DECLARE - result xml; -BEGIN - EXECUTE format('EXPLAIN (ANALYZE true, FORMAT XML) %s', query) INTO result; - RETURN result; -END; -$BODY$ LANGUAGE plpgsql; --- VACUMM related tables to ensure test outputs are stable -VACUUM ANALYZE lineitem; -VACUUM ANALYZE orders; --- Test Text format -EXPLAIN (COSTS FALSE, FORMAT TEXT) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Sort - Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity - -> HashAggregate - Group Key: remote_scan.l_quantity - -> Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: l_quantity - -> Seq Scan on lineitem_360000 lineitem --- Test disable hash aggregate -SET enable_hashagg TO off; -EXPLAIN (COSTS FALSE, FORMAT TEXT) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Sort - Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity - -> GroupAggregate - Group Key: remote_scan.l_quantity - -> Sort - Sort Key: remote_scan.l_quantity - -> Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: l_quantity - -> Seq Scan on lineitem_360000 lineitem -SET enable_hashagg TO on; --- Test JSON format -EXPLAIN (COSTS FALSE, FORMAT JSON) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -[ - { - "Plan": { - "Node Type": "Sort", - "Parallel Aware": false, - "Async Capable": false, - "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"], - "Plans": [ - { - "Node Type": "Aggregate", - "Strategy": "Hashed", - "Partial Mode": "Simple", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Async Capable": false, - "Group Key": ["remote_scan.l_quantity"], - "Plans": [ - { - "Node Type": "Custom Scan", - "Parent Relationship": "Outer", - "Custom Plan Provider": "Citus Adaptive", - "Parallel Aware": false, - "Async Capable": false, - "Distributed Query": { - "Job": { - "Task Count": 2, - "Tasks Shown": "One of 2", - "Tasks": [ - { - "Node": "host=localhost port=xxxxx dbname=regression", - "Remote Plan": [ - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Hashed", - "Partial Mode": "Simple", - "Parallel Aware": false, - "Async Capable": false, - "Group Key": ["l_quantity"], - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Async Capable": false, - "Relation Name": "lineitem_360000", - "Alias": "lineitem" - } - ] - } - } - ] - - ] - } - ] - } - } - } - ] - } - ] - } - } -] --- Validate JSON format -SELECT true AS valid FROM explain_json($$ - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); -t -SELECT true AS valid FROM explain_analyze_json($$ - WITH a AS ( - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity LIMIT 10) - SELECT count(*) FROM a -$$); -t --- Test XML format -EXPLAIN (COSTS FALSE, FORMAT XML) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - - - - Sort - false - false - - (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)) - remote_scan.l_quantity - - - - Aggregate - Hashed - Simple - Outer - false - false - - remote_scan.l_quantity - - - - Custom Scan - Outer - Citus Adaptive - false - false - - - 2 - One of 2 - - - host=localhost port=xxxxx dbname=regression - - - - - Aggregate - Hashed - Simple - false - false - - l_quantity - - - - Seq Scan - Outer - false - false - lineitem_360000 - lineitem - - - - - - - - - - - - - - - - - --- Validate XML format -SELECT true AS valid FROM explain_xml($$ - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); -t -SELECT true AS valid FROM explain_analyze_xml($$ - WITH a AS ( - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity LIMIT 10) - SELECT count(*) FROM a -$$); -t --- Test YAML format -EXPLAIN (COSTS FALSE, FORMAT YAML) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Plan: - Node Type: "Sort" - Parallel Aware: false - Async Capable: false - Sort Key: - - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))" - - "remote_scan.l_quantity" - Plans: - - Node Type: "Aggregate" - Strategy: "Hashed" - Partial Mode: "Simple" - Parent Relationship: "Outer" - Parallel Aware: false - Async Capable: false - Group Key: - - "remote_scan.l_quantity" - Plans: - - Node Type: "Custom Scan" - Parent Relationship: "Outer" - Custom Plan Provider: "Citus Adaptive" - Parallel Aware: false - Async Capable: false - Distributed Query: - Job: - Task Count: 2 - Tasks Shown: "One of 2" - Tasks: - - Node: "host=localhost port=xxxxx dbname=regression" - Remote Plan: - - Plan: - Node Type: "Aggregate" - Strategy: "Hashed" - Partial Mode: "Simple" - Parallel Aware: false - Async Capable: false - Group Key: - - "l_quantity" - Plans: - - Node Type: "Seq Scan" - Parent Relationship: "Outer" - Parallel Aware: false - Async Capable: false - Relation Name: "lineitem_360000" - Alias: "lineitem" - --- Test Text format -EXPLAIN (COSTS FALSE, FORMAT TEXT) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Sort - Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity - -> HashAggregate - Group Key: remote_scan.l_quantity - -> Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: l_quantity - -> Seq Scan on lineitem_360000 lineitem --- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -$Q$); -Sort (actual rows=50 loops=1) - Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity - Sort Method: quicksort Memory: xxx - -> HashAggregate (actual rows=50 loops=1) - Group Key: remote_scan.l_quantity - -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1) - Task Count: 2 - Tuple data received from nodes: 1800 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 900 bytes - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate (actual rows=50 loops=1) - Group Key: l_quantity - -> Seq Scan on lineitem_360000 lineitem (actual rows=5894 loops=1) --- EXPLAIN ANALYZE doesn't show worker tasks for repartition joins yet -SET citus.shard_count TO 3; -CREATE TABLE t1(a int, b int); -CREATE TABLE t2(a int, b int); -SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a'); -| -BEGIN; -SET LOCAL citus.enable_repartition_joins TO true; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; -Aggregate (actual rows=1 loops=1) - -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 6 - Tuple data received from nodes: 48 bytes - Tasks Shown: None, not supported for re-partition queries - -> MapMergeJob - Map Task Count: 3 - Merge Task Count: 6 - -> MapMergeJob - Map Task Count: 3 - Merge Task Count: 6 --- Confirm repartiton join in distributed subplan works -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) -WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b) -SELECT count(*) from repartition; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - -> Distributed Subplan XXX_1 - Intermediate Data Size: 14 bytes - Result destination: Write locally - -> Aggregate (actual rows=1 loops=1) - -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 6 - Tuple data received from nodes: 48 bytes - Tasks Shown: None, not supported for re-partition queries - -> MapMergeJob - Map Task Count: 3 - Merge Task Count: 6 - -> MapMergeJob - Map Task Count: 3 - Merge Task Count: 6 - Task Count: 1 - Tuple data received from nodes: 8 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 8 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate (actual rows=1 loops=1) - -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1) -END; -DROP TABLE t1, t2; --- Test query text output, with ANALYZE ON -SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE, BUFFERS OFF) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -$Q$); -Sort (actual rows=50 loops=1) - Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)) - Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity - Sort Method: quicksort Memory: xxx - -> HashAggregate (actual rows=50 loops=1) - Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint) - Group Key: remote_scan.l_quantity - -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1) - Output: remote_scan.l_quantity, remote_scan.count_quantity - Task Count: 2 - Tuple data received from nodes: 1800 bytes - Tasks Shown: One of 2 - -> Task - Query: SELECT l_quantity, count(*) AS count_quantity FROM public.lineitem_360000 lineitem WHERE true GROUP BY l_quantity - Tuple data received from node: 900 bytes - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate (actual rows=50 loops=1) - Output: l_quantity, count(*) - Group Key: lineitem.l_quantity - -> Seq Scan on public.lineitem_360000 lineitem (actual rows=5894 loops=1) - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment --- Test query text output, with ANALYZE OFF -EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Sort - Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)) - Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity - -> HashAggregate - Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint) - Group Key: remote_scan.l_quantity - -> Custom Scan (Citus Adaptive) - Output: remote_scan.l_quantity, remote_scan.count_quantity - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Query: SELECT l_quantity, count(*) AS count_quantity FROM public.lineitem_360000 lineitem WHERE true GROUP BY l_quantity - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: l_quantity, count(*) - Group Key: lineitem.l_quantity - -> Seq Scan on public.lineitem_360000 lineitem - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment --- Test verbose -EXPLAIN (COSTS FALSE, VERBOSE TRUE) - SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; -Aggregate - Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) - -> Custom Scan (Citus Adaptive) - Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2" - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM public.lineitem_360000 lineitem WHERE true - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - Output: sum(l_quantity), sum(l_quantity), count(l_quantity) - -> Seq Scan on public.lineitem_360000 lineitem - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment --- Test join -EXPLAIN (COSTS FALSE) - SELECT * FROM lineitem - JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5.0 - ORDER BY l_quantity LIMIT 10; -Limit - -> Sort - Sort Key: remote_scan.l_quantity - -> Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Limit - -> Sort - Sort Key: lineitem.l_quantity - -> Hash Join - Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) - -> Seq Scan on lineitem_360000 lineitem - Filter: (l_quantity < 5.0) - -> Hash - -> Seq Scan on orders_360002 orders --- Test insert -EXPLAIN (COSTS FALSE) - INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0); -Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on lineitem_360000 citus_table_alias - -> Values Scan on "*VALUES*" --- Test update -EXPLAIN (COSTS FALSE) - UPDATE lineitem - SET l_suppkey = 12 - WHERE l_orderkey = 1 AND l_partkey = 0; -Custom Scan (Citus Adaptive) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_360000 lineitem - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem - Index Cond: (l_orderkey = 1) - Filter: (l_partkey = 0) --- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -BEGIN; -select public.explain_filter(' -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) - UPDATE lineitem - SET l_suppkey = 12 - WHERE l_orderkey = 1 AND l_partkey = 0 - '); -Custom Scan (Citus Adaptive) (actual rows=N loops=N) - Task Count: N - Tasks Shown: All - -> Task - Node: host=localhost port=N dbname=regression - -> Update on lineitem_360000 lineitem (actual rows=N loops=N) - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N) - Index Cond: (l_orderkey = N) - Filter: (l_partkey = N) - Rows Removed by Filter: N -ROLLBACk; --- Test delete -EXPLAIN (COSTS FALSE) - DELETE FROM lineitem - WHERE l_orderkey = 1 AND l_partkey = 0; -Custom Scan (Citus Adaptive) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on lineitem_360000 lineitem - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem - Index Cond: (l_orderkey = 1) - Filter: (l_partkey = 0) --- Test zero-shard update -EXPLAIN (COSTS FALSE) - UPDATE lineitem - SET l_suppkey = 12 - WHERE l_orderkey = 1 AND l_orderkey = 0; -Custom Scan (Citus Adaptive) - Task Count: 0 - Tasks Shown: All --- Test zero-shard delete -EXPLAIN (COSTS FALSE) - DELETE FROM lineitem - WHERE l_orderkey = 1 AND l_orderkey = 0; -Custom Scan (Citus Adaptive) - Task Count: 0 - Tasks Shown: All --- Test single-shard SELECT -EXPLAIN (COSTS FALSE) - SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; -Custom Scan (Citus Adaptive) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem - Index Cond: (l_orderkey = 5) -SELECT true AS valid FROM explain_xml($$ - SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); -t -SELECT true AS valid FROM explain_json($$ - SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); -t --- Test CREATE TABLE ... AS -EXPLAIN (COSTS FALSE) - CREATE TABLE explain_result AS - SELECT * FROM lineitem; -Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on lineitem_360000 lineitem --- Test having -EXPLAIN (COSTS FALSE, VERBOSE TRUE) - SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem - HAVING sum(l_quantity) > 100; -Aggregate - Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) - Filter: (sum(remote_scan.worker_column_4) > '100'::numeric) - -> Custom Scan (Citus Adaptive) - Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4 - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) AS worker_column_4 FROM public.lineitem_360000 lineitem WHERE true - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) - -> Seq Scan on public.lineitem_360000 lineitem - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment --- Test having without aggregate -EXPLAIN (COSTS FALSE, VERBOSE TRUE) - SELECT l_quantity FROM lineitem - GROUP BY l_quantity - HAVING l_quantity > (100 * random()); -HashAggregate - Output: remote_scan.l_quantity - Group Key: remote_scan.l_quantity - Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random())) - -> Custom Scan (Citus Adaptive) - Output: remote_scan.l_quantity, remote_scan.worker_column_2 - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Query: SELECT l_quantity, l_quantity AS worker_column_2 FROM public.lineitem_360000 lineitem WHERE true GROUP BY l_quantity - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: l_quantity, l_quantity - Group Key: lineitem.l_quantity - -> Seq Scan on public.lineitem_360000 lineitem - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment --- Subquery pushdown tests with explain -EXPLAIN (COSTS OFF) -SELECT - avg(array_length(events, 1)) AS event_average -FROM - (SELECT - tenant_id, - user_id, - array_agg(event_type ORDER BY event_time) AS events - FROM - (SELECT - (users.composite_id).tenant_id, - (users.composite_id).user_id, - event_type, - events.event_time - FROM - users, - events - WHERE - (users.composite_id) = (events.composite_id) AND - users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type IN ('click', 'submit', 'pay')) AS subquery - GROUP BY - tenant_id, - user_id) AS subquery; -Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> GroupAggregate - Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) - -> Sort - Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) - -> Hash Join - Hash Cond: (users.composite_id = events.composite_id) - -> Seq Scan on users_1400289 users - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) - -> Hash - -> Seq Scan on events_1400285 events - Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[])) -SELECT success FROM run_command_on_workers('alter system set enable_nestloop to off'); -t -t -SELECT success FROM run_command_on_workers('alter system set enable_sort to off'); -t -t -SELECT success FROM run_command_on_workers('select pg_reload_conf()'); -t -t --- Union and left join subquery pushdown -EXPLAIN (COSTS OFF) -SELECT - avg(array_length(events, 1)) AS event_average, - hasdone -FROM - (SELECT - subquery_1.tenant_id, - subquery_1.user_id, - array_agg(event ORDER BY event_time) AS events, - COALESCE(hasdone, 'Has not done paying') AS hasdone - FROM - ( - (SELECT - (users.composite_id).tenant_id, - (users.composite_id).user_id, - (users.composite_id) as composite_id, - 'action=>1'AS event, - events.event_time - FROM - users, - events - WHERE - (users.composite_id) = (events.composite_id) AND - users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'click') - UNION - (SELECT - (users.composite_id).tenant_id, - (users.composite_id).user_id, - (users.composite_id) as composite_id, - 'action=>2'AS event, - events.event_time - FROM - users, - events - WHERE - (users.composite_id) = (events.composite_id) AND - users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'submit') - ) AS subquery_1 - LEFT JOIN - (SELECT - DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id, - (composite_id).tenant_id, - (composite_id).user_id, - 'Has done paying'::TEXT AS hasdone - FROM - events - WHERE - events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'pay') AS subquery_2 - ON - subquery_1.composite_id = subquery_2.composite_id - GROUP BY - subquery_1.tenant_id, - subquery_1.user_id, - hasdone) AS subquery_top -GROUP BY - hasdone; -HashAggregate - Group Key: remote_scan.hasdone - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: COALESCE(subquery_2.hasdone, 'Has not done paying'::text) - -> GroupAggregate - Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone - -> Sort - Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone - -> Hash Left Join - Hash Cond: (users.composite_id = subquery_2.composite_id) - -> HashAggregate - Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time - -> Append - -> Hash Join - Hash Cond: (users.composite_id = events.composite_id) - -> Seq Scan on users_1400289 users - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) - -> Hash - -> Seq Scan on events_1400285 events - Filter: ((event_type)::text = 'click'::text) - -> Hash Join - Hash Cond: (users_1.composite_id = events_1.composite_id) - -> Seq Scan on users_1400289 users_1 - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) - -> Hash - -> Seq Scan on events_1400285 events_1 - Filter: ((event_type)::text = 'submit'::text) - -> Hash - -> Subquery Scan on subquery_2 - -> Unique - -> Sort - Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id) - -> Seq Scan on events_1400285 events_2 - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text)) --- Union, left join and having subquery pushdown -EXPLAIN (COSTS OFF) - SELECT - avg(array_length(events, 1)) AS event_average, - count_pay - FROM ( - SELECT - subquery_1.tenant_id, - subquery_1.user_id, - array_agg(event ORDER BY event_time) AS events, - COALESCE(count_pay, 0) AS count_pay - FROM - ( - (SELECT - (users.composite_id).tenant_id, - (users.composite_id).user_id, - (users.composite_id), - 'action=>1'AS event, - events.event_time - FROM - users, - events - WHERE - (users.composite_id) = (events.composite_id) AND - users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'click') - UNION - (SELECT - (users.composite_id).tenant_id, - (users.composite_id).user_id, - (users.composite_id), - 'action=>2'AS event, - events.event_time - FROM - users, - events - WHERE - (users.composite_id) = (events.composite_id) AND - users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'submit') - ) AS subquery_1 - LEFT JOIN - (SELECT - (composite_id).tenant_id, - (composite_id).user_id, - composite_id, - COUNT(*) AS count_pay - FROM - events - WHERE - events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'pay' - GROUP BY - composite_id - HAVING - COUNT(*) > 2) AS subquery_2 - ON - subquery_1.composite_id = subquery_2.composite_id - GROUP BY - subquery_1.tenant_id, - subquery_1.user_id, - count_pay) AS subquery_top -WHERE - array_ndims(events) > 0 -GROUP BY - count_pay -ORDER BY - count_pay; -Sort - Sort Key: remote_scan.count_pay - -> HashAggregate - Group Key: remote_scan.count_pay - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: COALESCE(subquery_2.count_pay, '0'::bigint) - -> GroupAggregate - Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay - Filter: (array_ndims(array_agg(('action=>1'::text) ORDER BY events.event_time)) > 0) - -> Sort - Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay - -> Hash Left Join - Hash Cond: (users.composite_id = subquery_2.composite_id) - -> HashAggregate - Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time - -> Append - -> Hash Join - Hash Cond: (users.composite_id = events.composite_id) - -> Seq Scan on users_1400289 users - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) - -> Hash - -> Seq Scan on events_1400285 events - Filter: ((event_type)::text = 'click'::text) - -> Hash Join - Hash Cond: (users_1.composite_id = events_1.composite_id) - -> Seq Scan on users_1400289 users_1 - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) - -> Hash - -> Seq Scan on events_1400285 events_1 - Filter: ((event_type)::text = 'submit'::text) - -> Hash - -> Subquery Scan on subquery_2 - -> HashAggregate - Group Key: events_2.composite_id - Filter: (count(*) > 2) - -> Seq Scan on events_1400285 events_2 - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text)) -SELECT success FROM run_command_on_workers('alter system reset enable_nestloop'); -t -t -SELECT success FROM run_command_on_workers('alter system reset enable_sort'); -t -t -SELECT success FROM run_command_on_workers('select pg_reload_conf()'); -t -t --- Lateral join subquery pushdown --- set subquery_pushdown due to limit in the query -SET citus.subquery_pushdown to ON; -NOTICE: Setting citus.subquery_pushdown flag is discouraged becuase it forces the planner to pushdown certain queries, skipping relevant correctness checks. -DETAIL: When enabled, the planner skips many correctness checks for subqueries and pushes down the queries to shards as-is. It means that the queries are likely to return wrong results unless the user is absolutely sure that pushing down the subquery is safe. This GUC is maintained only for backward compatibility, no new users are supposed to use it. The planner is capable of pushing down as much computation as possible to the shards depending on the query. -EXPLAIN (COSTS OFF) -SELECT - tenant_id, - user_id, - user_lastseen, - event_array -FROM - (SELECT - tenant_id, - user_id, - max(lastseen) as user_lastseen, - array_agg(event_type ORDER BY event_time) AS event_array - FROM - (SELECT - (composite_id).tenant_id, - (composite_id).user_id, - composite_id, - lastseen - FROM - users - WHERE - composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - composite_id <= '(1, 9223372036854775807)'::user_composite_type - ORDER BY - lastseen DESC - LIMIT - 10 - ) AS subquery_top - LEFT JOIN LATERAL - (SELECT - event_type, - event_time - FROM - events - WHERE - (composite_id) = subquery_top.composite_id - ORDER BY - event_time DESC - LIMIT - 99) AS subquery_lateral - ON - true - GROUP BY - tenant_id, - user_id - ) AS shard_union -ORDER BY - user_lastseen DESC -LIMIT - 10; -Limit - -> Sort - Sort Key: remote_scan.user_lastseen DESC - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Limit - -> Sort - Sort Key: (max(users.lastseen)) DESC - -> GroupAggregate - Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) - -> Sort - Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) - -> Nested Loop Left Join - -> Limit - -> Sort - Sort Key: users.lastseen DESC - -> Seq Scan on users_1400289 users - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) - -> Limit - -> Sort - Sort Key: events.event_time DESC - -> Seq Scan on events_1400285 events - Filter: (composite_id = users.composite_id) -RESET citus.subquery_pushdown; --- Test all tasks output -SET citus.explain_all_tasks TO on; -EXPLAIN (COSTS FALSE) - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem - Index Cond: (l_orderkey > 9030) - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Index Only Scan using lineitem_pkey_360001 on lineitem_360001 lineitem - Index Cond: (l_orderkey > 9030) -SELECT true AS valid FROM explain_xml($$ - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); -t -SELECT true AS valid FROM explain_json($$ - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); -t --- Test multi shard update -EXPLAIN (COSTS FALSE) - UPDATE lineitem_hash_part - SET l_suppkey = 12; -Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_hash_part_360041 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_hash_part_360042 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_hash_part_360043 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_hash_part_360044 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part -EXPLAIN (COSTS FALSE) - UPDATE lineitem_hash_part - SET l_suppkey = 12 - WHERE l_orderkey = 1 OR l_orderkey = 3; -Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_hash_part_360041 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - Filter: ((l_orderkey = 1) OR (l_orderkey = 3)) - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_hash_part_360042 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part - Filter: ((l_orderkey = 1) OR (l_orderkey = 3)) --- Test multi shard delete -EXPLAIN (COSTS FALSE) - DELETE FROM lineitem_hash_part; -Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on lineitem_hash_part_360041 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on lineitem_hash_part_360042 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on lineitem_hash_part_360043 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on lineitem_hash_part_360044 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part --- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output) -SELECT public.plan_normalize_memory($Q$ -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -$Q$); -Sort (actual rows=50 loops=1) - Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity - Sort Method: quicksort Memory: xxx - -> HashAggregate (actual rows=50 loops=1) - Group Key: remote_scan.l_quantity - -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1) - Task Count: 2 - Tuple data received from nodes: 1800 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 900 bytes - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate (actual rows=50 loops=1) - Group Key: l_quantity - -> Seq Scan on lineitem_360000 lineitem (actual rows=5894 loops=1) - -> Task - Tuple data received from node: 900 bytes - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate (actual rows=50 loops=1) - Group Key: l_quantity - -> Seq Scan on lineitem_360001 lineitem (actual rows=6106 loops=1) -SET citus.explain_all_tasks TO off; --- Test update with subquery -EXPLAIN (COSTS FALSE) - UPDATE lineitem_hash_part - SET l_suppkey = 12 - FROM orders_hash_part - WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey; -Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_hash_part_360041 lineitem_hash_part - -> Hash Join - Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey) - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - -> Hash - -> Seq Scan on orders_hash_part_360045 orders_hash_part --- Test delete with subquery -EXPLAIN (COSTS FALSE) - DELETE FROM lineitem_hash_part - USING orders_hash_part - WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey; -Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on lineitem_hash_part_360041 lineitem_hash_part - -> Hash Join - Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey) - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - -> Hash - -> Seq Scan on orders_hash_part_360045 orders_hash_part --- Test track tracker -EXPLAIN (COSTS FALSE) - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem - Index Cond: (l_orderkey > 9030) --- Test re-partition join -EXPLAIN (COSTS FALSE) - SELECT count(*) - FROM lineitem, orders, customer_append, supplier_single_shard - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey; -Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 6 - Tasks Shown: None, not supported for re-partition queries - -> MapMergeJob - Map Task Count: 6 - Merge Task Count: 6 - -> MapMergeJob - Map Task Count: 2 - Merge Task Count: 6 - -> MapMergeJob - Map Task Count: 1 - Merge Task Count: 6 - -> MapMergeJob - Map Task Count: 1 - Merge Task Count: 6 -EXPLAIN (COSTS FALSE, FORMAT JSON) - SELECT count(*) - FROM lineitem, orders, customer_append, supplier_single_shard - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey; -[ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Simple", - "Parallel Aware": false, - "Async Capable": false, - "Plans": [ - { - "Node Type": "Custom Scan", - "Parent Relationship": "Outer", - "Custom Plan Provider": "Citus Adaptive", - "Parallel Aware": false, - "Async Capable": false, - "Distributed Query": { - "Job": { - "Task Count": 6, - "Tasks Shown": "None, not supported for re-partition queries", - "Dependent Jobs": [ - { - "Map Task Count": 6, - "Merge Task Count": 6, - "Dependent Jobs": [ - { - "Map Task Count": 2, - "Merge Task Count": 6 - }, - { - "Map Task Count": 1, - "Merge Task Count": 6 - } - ] - }, - { - "Map Task Count": 1, - "Merge Task Count": 6 - } - ] - } - } - } - ] - } - } -] -SELECT true AS valid FROM explain_json($$ - SELECT count(*) - FROM lineitem, orders, customer_append, supplier_single_shard - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey$$); -t -EXPLAIN (COSTS FALSE, FORMAT XML) - SELECT count(*) - FROM lineitem, orders, customer_append, supplier_single_shard - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey; - - - - Aggregate - Plain - Simple - false - false - - - Custom Scan - Outer - Citus Adaptive - false - false - - - 6 - None, not supported for re-partition queries - - - 6 - 6 - - - 2 - 6 - - - 1 - 6 - - - - - 1 - 6 - - - - - - - - - -SELECT true AS valid FROM explain_xml($$ - SELECT count(*) - FROM lineitem, orders, customer_append, supplier - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey$$); -t --- make sure that EXPLAIN works without --- problems for queries that inlvolves only --- reference tables -SELECT true AS valid FROM explain_xml($$ - SELECT count(*) - FROM nation - WHERE n_name = 'CHINA'$$); -t -SELECT true AS valid FROM explain_xml($$ - SELECT count(*) - FROM nation, supplier - WHERE nation.n_nationkey = supplier.s_nationkey$$); -t -EXPLAIN (COSTS FALSE, FORMAT YAML) - SELECT count(*) - FROM lineitem, orders, customer, supplier_single_shard - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey; -- Plan: - Node Type: "Aggregate" - Strategy: "Plain" - Partial Mode: "Simple" - Parallel Aware: false - Async Capable: false - Plans: - - Node Type: "Custom Scan" - Parent Relationship: "Outer" - Custom Plan Provider: "Citus Adaptive" - Parallel Aware: false - Async Capable: false - Distributed Query: - Job: - Task Count: 6 - Tasks Shown: "None, not supported for re-partition queries" - Dependent Jobs: - - Map Task Count: 2 - Merge Task Count: 6 - - Map Task Count: 1 - Merge Task Count: 6 --- ensure local plans display correctly -CREATE TABLE lineitem_clone (LIKE lineitem); -EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone; -Aggregate - -> Seq Scan on lineitem_clone -DROP TABLE lineitem_clone; --- ensure distributed plans don't break -EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem; -Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Seq Scan on lineitem_360000 lineitem --- ensure EXPLAIN EXECUTE doesn't crash -PREPARE task_tracker_query AS - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query; -Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem - Index Cond: (l_orderkey > 9030) -PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; -EXPLAIN EXECUTE router_executor_query; -Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5) - Index Cond: (l_orderkey = 5) -PREPARE real_time_executor_query AS - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query; -Aggregate - -> Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate - -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem - Index Cond: (l_orderkey > 9030) --- EXPLAIN EXECUTE of parametrized prepared statements is broken, but --- at least make sure to fail without crashing -PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1; -EXPLAIN EXECUTE router_executor_query_param(5); -Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5) - Index Cond: (l_orderkey = 5) -select public.explain_filter('EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE router_executor_query_param(5)'); -Custom Scan (Citus Adaptive) (actual rows=N loops=N) - Task Count: N - Tuple data received from nodes: N bytes - Tasks Shown: All - -> Task - Tuple data received from node: N bytes - Node: host=localhost port=N dbname=regression - -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=N loops=N) - Index Cond: (l_orderkey = N) -\set VERBOSITY TERSE -PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1; -BEGIN; -EXPLAIN (COSTS OFF) EXECUTE multi_shard_query_param(5); -Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_360000 lineitem - -> Seq Scan on lineitem_360000 lineitem -ROLLBACK; -BEGIN; -EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) EXECUTE multi_shard_query_param(5); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on lineitem_360000 lineitem (actual rows=0 loops=1) - -> Seq Scan on lineitem_360000 lineitem (actual rows=5894 loops=1) -ROLLBACK; -\set VERBOSITY DEFAULT --- test explain in a transaction with alter table to test we use right connections -BEGIN; -CREATE TABLE explain_table(id int); -SELECT create_distributed_table('explain_table', 'id'); - -ALTER TABLE explain_table ADD COLUMN value int; -ROLLBACK; --- test explain with local INSERT ... SELECT -EXPLAIN (COSTS OFF) -INSERT INTO lineitem_hash_part -SELECT o_orderkey FROM orders_hash_part LIMIT 3; -Custom Scan (Citus INSERT ... SELECT) - INSERT/SELECT method: pull to coordinator - -> Limit - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Limit - -> Seq Scan on orders_hash_part_360045 orders_hash_part -SELECT true AS valid FROM explain_json($$ - INSERT INTO lineitem_hash_part (l_orderkey) - SELECT o_orderkey FROM orders_hash_part LIMIT 3; -$$); -t -EXPLAIN (COSTS OFF) -INSERT INTO lineitem_hash_part (l_orderkey, l_quantity) -SELECT o_orderkey, 5 FROM orders_hash_part LIMIT 3; -Custom Scan (Citus INSERT ... SELECT) - INSERT/SELECT method: pull to coordinator - -> Limit - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Limit - -> Seq Scan on orders_hash_part_360045 orders_hash_part -EXPLAIN (COSTS OFF) -INSERT INTO lineitem_hash_part (l_orderkey) -SELECT s FROM generate_series(1,5) s; -Custom Scan (Citus INSERT ... SELECT) - INSERT/SELECT method: pull to coordinator - -> Function Scan on generate_series s --- WHERE EXISTS forces pg12 to materialize cte -SELECT public.explain_with_pg17_initplan_format($Q$ -EXPLAIN (COSTS OFF) -WITH cte1 AS (SELECT s FROM generate_series(1,10) s) -INSERT INTO lineitem_hash_part -WITH cte1 AS (SELECT * FROM cte1 WHERE EXISTS (SELECT * FROM cte1) LIMIT 5) -SELECT s FROM cte1 WHERE EXISTS (SELECT * FROM cte1); -$Q$); -Custom Scan (Citus INSERT ... SELECT) - INSERT/SELECT method: pull to coordinator - -> Result - One-Time Filter: (InitPlan 4).col1 - CTE cte1 - -> Function Scan on generate_series s - CTE cte1 - -> Limit - InitPlan 2 - -> CTE Scan on cte1 cte1_1 - -> Result - One-Time Filter: (InitPlan 2).col1 - -> CTE Scan on cte1 cte1_2 - InitPlan 4 - -> CTE Scan on cte1 cte1_3 - -> CTE Scan on cte1 -EXPLAIN (COSTS OFF) -INSERT INTO lineitem_hash_part -( SELECT s FROM generate_series(1,5) s) UNION -( SELECT s FROM generate_series(5,10) s); -Custom Scan (Citus INSERT ... SELECT) - INSERT/SELECT method: pull to coordinator - -> Subquery Scan on citus_insert_select_subquery - -> HashAggregate - Group Key: s.s - -> Append - -> Function Scan on generate_series s - -> Function Scan on generate_series s_1 --- explain with recursive planning -EXPLAIN (COSTS OFF, VERBOSE true) -WITH keys AS MATERIALIZED ( - SELECT DISTINCT l_orderkey FROM lineitem_hash_part -), -series AS MATERIALIZED ( - SELECT s FROM generate_series(1,10) s -) -SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey) -ORDER BY s; -Custom Scan (Citus Adaptive) - Output: remote_scan.l_orderkey - -> Distributed Subplan XXX_1 - -> HashAggregate - Output: remote_scan.l_orderkey - Group Key: remote_scan.l_orderkey - -> Custom Scan (Citus Adaptive) - Output: remote_scan.l_orderkey - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Query: SELECT DISTINCT l_orderkey FROM public.lineitem_hash_part_360041 lineitem_hash_part WHERE true - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: l_orderkey - Group Key: lineitem_hash_part.l_orderkey - -> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment - -> Distributed Subplan XXX_2 - -> Function Scan on pg_catalog.generate_series s - Output: s - Function Call: generate_series(1, 10) - Task Count: 1 - Tasks Shown: All - -> Task - Query: SELECT keys.l_orderkey FROM ((SELECT intermediate_result.s FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(s integer)) series JOIN (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) keys ON ((series.s OPERATOR(pg_catalog.=) keys.l_orderkey))) ORDER BY series.s - Node: host=localhost port=xxxxx dbname=regression - -> Merge Join - Output: intermediate_result_1.l_orderkey, intermediate_result.s - Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey) - -> Sort - Output: intermediate_result.s - Sort Key: intermediate_result.s - -> Function Scan on pg_catalog.read_intermediate_result intermediate_result - Output: intermediate_result.s - Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) - -> Sort - Output: intermediate_result_1.l_orderkey - Sort Key: intermediate_result_1.l_orderkey - -> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1 - Output: intermediate_result_1.l_orderkey - Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) -SELECT true AS valid FROM explain_json($$ - WITH result AS ( - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity - ), - series AS ( - SELECT s FROM generate_series(1,10) s - ) - SELECT * FROM result JOIN series ON (s = count_quantity) JOIN orders_hash_part ON (s = o_orderkey) -$$); -t -SELECT true AS valid FROM explain_xml($$ - WITH result AS ( - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity - ), - series AS ( - SELECT s FROM generate_series(1,10) s - ) - SELECT * FROM result JOIN series ON (s = l_quantity) JOIN orders_hash_part ON (s = o_orderkey) -$$); -t --- --- Test EXPLAIN ANALYZE udfs --- -\a\t -\set default_opts '''{"costs": false, "timing": false, "summary": false}'''::jsonb -CREATE TABLE explain_analyze_test(a int, b text); -INSERT INTO explain_analyze_test VALUES (1, 'value 1'), (2, 'value 2'), (3, 'value 3'), (4, 'value 4'); --- simple select -BEGIN; -SELECT * FROM worker_save_query_explain_analyze('SELECT 1', :default_opts) as (a int); - a ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - explain_analyze_output ---------------------------------------------------------------------- - Result (actual rows=1 loops=1)+ - -(1 row) - -END; --- insert into select -BEGIN; -SELECT * FROM worker_save_query_explain_analyze($Q$ - INSERT INTO explain_analyze_test SELECT i, i::text FROM generate_series(1, 5) i $Q$, - :default_opts) as (a int); - a ---------------------------------------------------------------------- -(0 rows) - -SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - explain_analyze_output ---------------------------------------------------------------------- - Insert on explain_analyze_test (actual rows=0 loops=1) + - -> Function Scan on generate_series i (actual rows=5 loops=1)+ - -(1 row) - -ROLLBACK; --- select from table -BEGIN; -SELECT * FROM worker_save_query_explain_analyze($Q$SELECT * FROM explain_analyze_test$Q$, - :default_opts) as (a int, b text); - a | b ---------------------------------------------------------------------- - 1 | value 1 - 2 | value 2 - 3 | value 3 - 4 | value 4 -(4 rows) - -SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - explain_analyze_output ---------------------------------------------------------------------- - Seq Scan on explain_analyze_test (actual rows=4 loops=1)+ - -(1 row) - -ROLLBACK; --- insert into with returning -BEGIN; -SELECT * FROM worker_save_query_explain_analyze($Q$ - INSERT INTO explain_analyze_test SELECT i, i::text FROM generate_series(1, 5) i - RETURNING a, b$Q$, - :default_opts) as (a int, b text); - a | b ---------------------------------------------------------------------- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 -(5 rows) - -SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - explain_analyze_output ---------------------------------------------------------------------- - Insert on explain_analyze_test (actual rows=5 loops=1) + - -> Function Scan on generate_series i (actual rows=5 loops=1)+ - -(1 row) - -ROLLBACK; --- delete with returning -BEGIN; -SELECT * FROM worker_save_query_explain_analyze($Q$ - DELETE FROM explain_analyze_test WHERE a % 2 = 0 - RETURNING a, b$Q$, - :default_opts) as (a int, b text); - a | b ---------------------------------------------------------------------- - 2 | value 2 - 4 | value 4 -(2 rows) - -SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - explain_analyze_output ---------------------------------------------------------------------- - Delete on explain_analyze_test (actual rows=2 loops=1) + - -> Seq Scan on explain_analyze_test (actual rows=2 loops=1)+ - Filter: ((a % 2) = 0) + - Rows Removed by Filter: 2 + - -(1 row) - -ROLLBACK; --- delete without returning -BEGIN; -SELECT * FROM worker_save_query_explain_analyze($Q$ - DELETE FROM explain_analyze_test WHERE a % 2 = 0$Q$, - :default_opts) as (a int); - a ---------------------------------------------------------------------- -(0 rows) - -SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - explain_analyze_output ---------------------------------------------------------------------- - Delete on explain_analyze_test (actual rows=0 loops=1) + - -> Seq Scan on explain_analyze_test (actual rows=2 loops=1)+ - Filter: ((a % 2) = 0) + - Rows Removed by Filter: 2 + - -(1 row) - -ROLLBACK; --- multiple queries (should ERROR) -SELECT * FROM worker_save_query_explain_analyze('SELECT 1; SELECT 2', :default_opts) as (a int); -ERROR: cannot EXPLAIN ANALYZE multiple queries --- error in query -SELECT * FROM worker_save_query_explain_analyze('SELECT x', :default_opts) as (a int); -ERROR: column "x" does not exist --- error in format string -SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "invlaid_format"}') as (a int); -ERROR: Invalid explain analyze format: "invlaid_format" --- test formats -BEGIN; -SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "text", "costs": false}') as (a int); - a ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - explain_analyze_output ---------------------------------------------------------------------- - Result (actual rows=1 loops=1)+ - -(1 row) - -SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "json", "costs": false}') as (a int); - a ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - explain_analyze_output ---------------------------------------------------------------------- - [ + - { + - "Plan": { + - "Node Type": "Result", + - "Parallel Aware": false,+ - "Async Capable": false, + - "Actual Rows": 1, + - "Actual Loops": 1 + - }, + - "Triggers": [ + - ] + - } + - ] -(1 row) - -SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "xml", "costs": false}') as (a int); - a ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - explain_analyze_output ---------------------------------------------------------------------- - + - + - + - Result + - false + - false + - 1 + - 1 + - + - + - + - + - -(1 row) - -SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "yaml", "costs": false}') as (a int); - a ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - explain_analyze_output ---------------------------------------------------------------------- - - Plan: + - Node Type: "Result" + - Parallel Aware: false+ - Async Capable: false + - Actual Rows: 1 + - Actual Loops: 1 + - Triggers: -(1 row) - -END; --- costs on, timing off -BEGIN; -SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": true}') as (a int); - a ---------------------------------------------------------------------- - 1 - 2 - 3 - 4 -(4 rows) - -SELECT explain_analyze_output ~ 'Seq Scan.*\(cost=0.00.*\) \(actual rows.*\)' FROM worker_last_saved_explain_analyze(); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -END; --- costs off, timing on -BEGIN; -SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": true, "costs": false}') as (a int); - a ---------------------------------------------------------------------- - 1 - 2 - 3 - 4 -(4 rows) - -SELECT explain_analyze_output ~ 'Seq Scan on explain_analyze_test \(actual time=.* rows=.* loops=1\)' FROM worker_last_saved_explain_analyze(); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -END; --- summary on -BEGIN; -SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"timing": false, "costs": false, "summary": true}') as (a int); - a ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT explain_analyze_output ~ 'Planning Time:.*Execution Time:.*' FROM worker_last_saved_explain_analyze(); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -END; --- buffers on -BEGIN; -SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": false, "buffers": true}') as (a int); - a ---------------------------------------------------------------------- - 1 - 2 - 3 - 4 -(4 rows) - -SELECT explain_analyze_output ~ 'Buffers:' FROM worker_last_saved_explain_analyze(); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -END; --- verbose on -BEGIN; -SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": false, "verbose": true}') as (a int); - a ---------------------------------------------------------------------- - 1 - 2 - 3 - 4 -(4 rows) - -SELECT explain_analyze_output ~ 'Output: a, b' FROM worker_last_saved_explain_analyze(); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -END; --- make sure deleted at transaction end -SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{}') as (a int); - a ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT count(*) FROM worker_last_saved_explain_analyze(); - count ---------------------------------------------------------------------- - 0 -(1 row) - --- should be deleted at the end of prepare commit -BEGIN; -SELECT * FROM worker_save_query_explain_analyze('UPDATE explain_analyze_test SET a=6 WHERE a=4', '{}') as (a int); - a ---------------------------------------------------------------------- -(0 rows) - -SELECT count(*) FROM worker_last_saved_explain_analyze(); - count ---------------------------------------------------------------------- - 1 -(1 row) - -PREPARE TRANSACTION 'citus_0_1496350_7_0'; -SELECT count(*) FROM worker_last_saved_explain_analyze(); - count ---------------------------------------------------------------------- - 0 -(1 row) - -COMMIT PREPARED 'citus_0_1496350_7_0'; --- verify execution time makes sense -BEGIN; -SELECT count(*) FROM worker_save_query_explain_analyze('SELECT pg_sleep(0.05)', :default_opts) as (a int); - count ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT execution_duration BETWEEN 30 AND 200 FROM worker_last_saved_explain_analyze(); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -END; --- --- verify we handle parametrized queries properly --- -CREATE TABLE t(a int); -INSERT INTO t VALUES (1), (2), (3); --- simple case -PREPARE save_explain AS -SELECT $1, * FROM worker_save_query_explain_analyze('SELECT $1::int', :default_opts) as (a int); -EXECUTE save_explain(1); - ?column? | a ---------------------------------------------------------------------- - 1 | 1 -(1 row) - -deallocate save_explain; --- Call a UDF first to make sure that we handle stacks of executorBoundParams properly. --- --- The prepared statement will first call f() which will force new executor run with new --- set of parameters. Then it will call worker_save_query_explain_analyze with a --- parametrized query. If we don't have the correct set of parameters here, it will fail. -CREATE FUNCTION f() RETURNS INT -AS $$ -PREPARE pp1 AS SELECT $1 WHERE $2 = $3; -EXECUTE pp1(4, 5, 5); -deallocate pp1; -SELECT 1$$ LANGUAGE sql volatile; -PREPARE save_explain AS - SELECT $1, CASE WHEN i < 2 THEN - f() = 1 - ELSE - EXISTS(SELECT * FROM worker_save_query_explain_analyze('SELECT $1::int', :default_opts) as (a int) - WHERE a = 1) - END - FROM generate_series(1, 4) i; -EXECUTE save_explain(1); - ?column? | exists ---------------------------------------------------------------------- - 1 | t - 1 | t - 1 | t - 1 | t -(4 rows) - -deallocate save_explain; -DROP FUNCTION f(); -DROP TABLE t; -SELECT * FROM explain_analyze_test ORDER BY a; - a | b ---------------------------------------------------------------------- - 1 | value 1 - 2 | value 2 - 3 | value 3 - 6 | value 4 -(4 rows) - -\a\t --- --- Test different cases of EXPLAIN ANALYZE --- -SET citus.shard_count TO 4; -SET client_min_messages TO WARNING; -SELECT create_distributed_table('explain_analyze_test', 'a'); - -\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off)' -\set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)' --- router SELECT -EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 1; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 11 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 11 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1) - Filter: (a = 1) --- multi-shard SELECT -EXPLAIN :default_analyze_flags SELECT count(*) FROM explain_analyze_test; -Aggregate (actual rows=1 loops=1) - -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1) - Task Count: 4 - Tuple data received from nodes: 32 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 8 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate (actual rows=1 loops=1) - -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1) --- empty router SELECT -EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 10000; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1) - Filter: (a = 10000) - Rows Removed by Filter: 1 --- empty multi-shard SELECT -EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE b = 'does not exist'; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 4 - Tuple data received from nodes: 0 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1) - Filter: (b = 'does not exist'::text) - Rows Removed by Filter: 1 --- router DML -BEGIN; -EXPLAIN :default_analyze_flags DELETE FROM explain_analyze_test WHERE a = 1; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1) - -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1) - Filter: (a = 1) -EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'b' WHERE a = 2; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1) - -> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=1 loops=1) - Filter: (a = 2) -SELECT * FROM explain_analyze_test ORDER BY a; -2|b -3|value 3 -6|value 4 -ROLLBACK; --- multi-shard DML -BEGIN; -EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'b' WHERE a IN (1, 2); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1) - -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1) - Filter: (a = ANY ('{1,2}'::integer[])) -EXPLAIN :default_analyze_flags DELETE FROM explain_analyze_test; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1) - -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1) -SELECT * FROM explain_analyze_test ORDER BY a; -ROLLBACK; --- router DML with RETURNING with empty result -EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'something' WHERE a = 10000 RETURNING *; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Update on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1) - -> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1) - Filter: (a = 10000) - Rows Removed by Filter: 1 --- multi-shard DML with RETURNING with empty result -EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'something' WHERE b = 'does not exist' RETURNING *; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 4 - Tuple data received from nodes: 0 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Update on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1) - -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1) - Filter: (b = 'does not exist'::text) - Rows Removed by Filter: 1 --- single-row insert -BEGIN; -EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test VALUES (5, 'value 5'); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on explain_analyze_test_570009 (actual rows=0 loops=1) - -> Result (actual rows=1 loops=1) -ROLLBACK; --- multi-row insert -BEGIN; -EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test VALUES (5, 'value 5'), (6, 'value 6'); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on explain_analyze_test_570009 citus_table_alias (actual rows=0 loops=1) - -> Result (actual rows=1 loops=1) -ROLLBACK; --- distributed insert/select -BEGIN; -EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test SELECT * FROM explain_analyze_test; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on explain_analyze_test_570009 citus_table_alias (actual rows=0 loops=1) - -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1) - Filter: (a IS NOT NULL) -ROLLBACK; -DROP TABLE explain_analyze_test; --- test EXPLAIN ANALYZE works fine with primary keys -CREATE TABLE explain_pk(a int primary key, b int); -SELECT create_distributed_table('explain_pk', 'a'); - -BEGIN; -EXPLAIN :default_analyze_flags INSERT INTO explain_pk VALUES (1, 2), (2, 3); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on explain_pk_570013 citus_table_alias (actual rows=0 loops=1) - -> Result (actual rows=1 loops=1) -SELECT * FROM explain_pk ORDER BY 1; -1|2 -2|3 -ROLLBACK; --- test EXPLAIN ANALYZE with non-text output formats -BEGIN; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); -- Plan: - Node Type: "Custom Scan" - Custom Plan Provider: "Citus Adaptive" - Parallel Aware: false - Async Capable: false - Actual Rows: 0 - Actual Loops: 1 - Distributed Query: - Job: - Task Count: 2 - Tasks Shown: "One of 2" - Tasks: - - Node: "host=localhost port=xxxxx dbname=regression" - Remote Plan: - - Plan: - Node Type: "ModifyTable" - Operation: "Insert" - Parallel Aware: false - Async Capable: false - Relation Name: "explain_pk_570013" - Alias: "citus_table_alias" - Actual Rows: 0 - Actual Loops: 1 - Plans: - - Node Type: "Result" - Parent Relationship: "Outer" - Parallel Aware: false - Async Capable: false - Actual Rows: 1 - Actual Loops: 1 - Triggers: - - Triggers: -ROLLBACK; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT YAML, BUFFERS OFF) SELECT * FROM explain_pk; -- Plan: - Node Type: "Custom Scan" - Custom Plan Provider: "Citus Adaptive" - Parallel Aware: false - Async Capable: false - Actual Rows: 0 - Actual Loops: 1 - Distributed Query: - Job: - Task Count: 4 - Tuple data received from nodes: "0 bytes" - Tasks Shown: "One of 4" - Tasks: - - Tuple data received from node: "0 bytes" - Node: "host=localhost port=xxxxx dbname=regression" - Remote Plan: - - Plan: - Node Type: "Seq Scan" - Parallel Aware: false - Async Capable: false - Relation Name: "explain_pk_570013" - Alias: "explain_pk" - Actual Rows: 0 - Actual Loops: 1 - Triggers: - - Triggers: -BEGIN; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) INSERT INTO explain_pk VALUES (1, 2), (2, 3); - - - - Custom Scan - Citus Adaptive - false - false - 0 - 1 - - - 2 - One of 2 - - - host=localhost port=xxxxx dbname=regression - - - - - ModifyTable - Insert - false - false - explain_pk_570013 - citus_table_alias - 0 - 1 - - - Result - Outer - false - false - 1 - 1 - - - - - - - - - - - - - - - - - -ROLLBACK; -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML, BUFFERS OFF) SELECT * FROM explain_pk; - - - - Custom Scan - Citus Adaptive - false - false - 0 - 1 - - - 4 - 0 bytes - One of 4 - - - 0 bytes - host=localhost port=xxxxx dbname=regression - - - - - Seq Scan - false - false - explain_pk_570013 - explain_pk - 0 - 1 - - - - - - - - - - - - - - - -DROP TABLE explain_pk; --- test EXPLAIN ANALYZE with CTEs and subqueries -CREATE TABLE dist_table(a int, b int); -SELECT create_distributed_table('dist_table', 'a'); - -CREATE TABLE ref_table(a int); -SELECT create_reference_table('ref_table'); - -INSERT INTO dist_table SELECT i, i*i FROM generate_series(1, 10) i; -INSERT INTO ref_table SELECT i FROM generate_series(1, 10) i; -EXPLAIN :default_analyze_flags -WITH r AS ( - SELECT GREATEST(random(), 2) r, a FROM dist_table -) -SELECT count(distinct a) from r NATURAL JOIN ref_table; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - -> Distributed Subplan XXX_1 - Intermediate Data Size: 220 bytes - Result destination: Send to 3 nodes - -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1) - Task Count: 4 - Tuple data received from nodes: 120 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 48 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1) - Task Count: 1 - Tuple data received from nodes: 8 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 8 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate (actual rows=1 loops=1) - -> Hash Join (actual rows=10 loops=1) - Hash Cond: (ref_table.a = intermediate_result.a) - -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1) - -> Hash (actual rows=10 loops=1) - -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1) -EXPLAIN :default_analyze_flags -SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table; -Aggregate (actual rows=1 loops=1) - -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1) - Task Count: 4 - Tuple data received from nodes: 32 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 8 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate (actual rows=1 loops=1) - -> Merge Join (actual rows=4 loops=1) - Merge Cond: (t.a = ref_table.a) - -> Sort (actual rows=4 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: 25kB - -> Subquery Scan on t (actual rows=4 loops=1) - -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1) - -> Sort (actual rows=10 loops=1) - Sort Key: ref_table.a - Sort Method: quicksort Memory: 25kB - -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1) -SELECT public.explain_with_pg17_initplan_format($Q$ -EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS OFF) -SELECT count(distinct a) FROM dist_table -WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table); -$Q$); -Aggregate (actual rows=1 loops=1) - -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1) - -> Distributed Subplan XXX_1 - Intermediate Data Size: 70 bytes - Result destination: Send to 2 nodes - -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1) - Task Count: 4 - Tuple data received from nodes: 10 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Merge Join (actual rows=4 loops=1) - Merge Cond: (dist_table.a = ref_table.a) - -> Sort (actual rows=4 loops=1) - Sort Key: dist_table.a - Sort Method: quicksort Memory: 25kB - -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1) - -> Sort (actual rows=10 loops=1) - Sort Key: ref_table.a - Sort Method: quicksort Memory: 25kB - -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1) - Task Count: 4 - Tuple data received from nodes: 32 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 8 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate (actual rows=1 loops=1) - InitPlan 1 - -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1) - -> Result (actual rows=4 loops=1) - One-Time Filter: (InitPlan 1).col1 - -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1) -BEGIN; -EXPLAIN :default_analyze_flags -WITH r AS ( - INSERT INTO dist_table SELECT a, a * a FROM dist_table - RETURNING a -), s AS ( - SELECT random() < 2, a * a a2 FROM r -) -SELECT count(distinct a2) FROM s; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - -> Distributed Subplan XXX_1 - Intermediate Data Size: 100 bytes - Result destination: Write locally - -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1) - Task Count: 4 - Tuple data received from nodes: 80 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 32 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Insert on dist_table_570017 citus_table_alias (actual rows=4 loops=1) - -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1) - Filter: (a IS NOT NULL) - -> Distributed Subplan XXX_2 - Intermediate Data Size: 150 bytes - Result destination: Write locally - -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1) - Task Count: 1 - Tuple data received from nodes: 50 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 50 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1) - Task Count: 1 - Tuple data received from nodes: 8 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 8 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Aggregate (actual rows=1 loops=1) - -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1) -ROLLBACK; --- https://github.com/citusdata/citus/issues/4074 -prepare ref_select(int) AS select * from ref_table where 1 = $1; -explain :default_analyze_flags execute ref_select(1); -Custom Scan (Citus Adaptive) (actual rows=10 loops=1) - Task Count: 1 - Tuple data received from nodes: 40 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 40 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Result (actual rows=10 loops=1) - One-Time Filter: (1 = $1) - -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1) -deallocate ref_select; -DROP TABLE ref_table, dist_table; --- test EXPLAIN ANALYZE with different replication factors -SET citus.shard_count = 2; -SET citus.shard_replication_factor = 1; -CREATE TABLE dist_table_rep1(a int); -SELECT create_distributed_table('dist_table_rep1', 'a'); - -SET citus.shard_replication_factor = 2; -CREATE TABLE dist_table_rep2(a int); -SELECT create_distributed_table('dist_table_rep2', 'a'); - -EXPLAIN :default_analyze_flags INSERT INTO dist_table_rep1 VALUES(1), (2), (3), (4), (10), (100) RETURNING *; -Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 2 - Tuple data received from nodes: 24 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Insert on dist_table_rep1_570022 citus_table_alias (actual rows=4 loops=1) - -> Values Scan on "*VALUES*" (actual rows=4 loops=1) -EXPLAIN :default_analyze_flags SELECT * from dist_table_rep1; -Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 2 - Tuple data received from nodes: 24 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1) -EXPLAIN :default_analyze_flags INSERT INTO dist_table_rep2 VALUES(1), (2), (3), (4), (10), (100) RETURNING *; -Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 2 - Tuple data received from nodes: 48 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 32 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Insert on dist_table_rep2_570024 citus_table_alias (actual rows=4 loops=1) - -> Values Scan on "*VALUES*" (actual rows=4 loops=1) -EXPLAIN :default_analyze_flags SELECT * from dist_table_rep2; -Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 2 - Tuple data received from nodes: 24 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep2_570024 dist_table_rep2 (actual rows=4 loops=1) -prepare p1 as SELECT * FROM dist_table_rep1; -EXPLAIN :default_analyze_flags EXECUTE p1; -Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 2 - Tuple data received from nodes: 24 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1) -EXPLAIN :default_analyze_flags EXECUTE p1; -Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 2 - Tuple data received from nodes: 24 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1) -EXPLAIN :default_analyze_flags EXECUTE p1; -Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 2 - Tuple data received from nodes: 24 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1) -EXPLAIN :default_analyze_flags EXECUTE p1; -Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 2 - Tuple data received from nodes: 24 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1) -EXPLAIN :default_analyze_flags EXECUTE p1; -Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 2 - Tuple data received from nodes: 24 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1) -EXPLAIN :default_analyze_flags EXECUTE p1; -Custom Scan (Citus Adaptive) (actual rows=6 loops=1) - Task Count: 2 - Tuple data received from nodes: 24 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1) -prepare p2 AS SELECT * FROM dist_table_rep1 WHERE a = $1; -EXPLAIN :default_analyze_flags EXECUTE p2(1); -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p2(1); -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p2(1); -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p2(1); -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p2(1); -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p2(1); -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p2(10); -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 10) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p2(100); -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570023 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 100) - Rows Removed by Filter: 1 -prepare p3 AS SELECT * FROM dist_table_rep1 WHERE a = 1; -EXPLAIN :default_analyze_flags EXECUTE p3; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p3; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p3; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p3; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p3; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -EXPLAIN :default_analyze_flags EXECUTE p3; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 4 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1) - Filter: (a = 1) - Rows Removed by Filter: 3 -DROP TABLE dist_table_rep1, dist_table_rep2; --- https://github.com/citusdata/citus/issues/2009 -CREATE TABLE simple (id integer, name text); -SELECT create_distributed_table('simple', 'id'); - -PREPARE simple_router AS SELECT *, $1 FROM simple WHERE id = 1; -EXPLAIN :default_explain_flags EXECUTE simple_router(1); -Custom Scan (Citus Adaptive) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on simple_570026 simple - Filter: (id = 1) -EXPLAIN :default_analyze_flags EXECUTE simple_router(1); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on simple_570026 simple (actual rows=0 loops=1) - Filter: (id = 1) -EXPLAIN :default_analyze_flags EXECUTE simple_router(1); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on simple_570026 simple (actual rows=0 loops=1) - Filter: (id = 1) -EXPLAIN :default_analyze_flags EXECUTE simple_router(1); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on simple_570026 simple (actual rows=0 loops=1) - Filter: (id = 1) -EXPLAIN :default_analyze_flags EXECUTE simple_router(1); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on simple_570026 simple (actual rows=0 loops=1) - Filter: (id = 1) -EXPLAIN :default_analyze_flags EXECUTE simple_router(1); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on simple_570026 simple (actual rows=0 loops=1) - Filter: (id = 1) -EXPLAIN :default_analyze_flags EXECUTE simple_router(1); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on simple_570026 simple (actual rows=0 loops=1) - Filter: (id = 1) -EXPLAIN :default_analyze_flags EXECUTE simple_router(1); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on simple_570026 simple (actual rows=0 loops=1) - Filter: (id = 1) -deallocate simple_router; --- prepared multi-row insert -PREPARE insert_query AS INSERT INTO simple VALUES ($1, 2), (2, $2); -EXPLAIN :default_explain_flags EXECUTE insert_query(3, 4); -Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on simple_570026 citus_table_alias - -> Result -EXPLAIN :default_analyze_flags EXECUTE insert_query(3, 4); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on simple_570026 citus_table_alias (actual rows=0 loops=1) - -> Result (actual rows=1 loops=1) -deallocate insert_query; --- prepared updates -PREPARE update_query AS UPDATE simple SET name=$1 WHERE name=$2; -EXPLAIN :default_explain_flags EXECUTE update_query('x', 'y'); -Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on simple_570026 simple - -> Seq Scan on simple_570026 simple - Filter: (name = 'y'::text) -EXPLAIN :default_analyze_flags EXECUTE update_query('x', 'y'); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Update on simple_570026 simple (actual rows=0 loops=1) - -> Seq Scan on simple_570026 simple (actual rows=0 loops=1) - Filter: (name = $2) - Rows Removed by Filter: 1 -deallocate update_query; --- prepared deletes -PREPARE delete_query AS DELETE FROM simple WHERE name=$1 OR name=$2; -EXPLAIN (COSTS OFF) EXECUTE delete_query('x', 'y'); -Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on simple_570026 simple - -> Seq Scan on simple_570026 simple - Filter: ((name = 'x'::text) OR (name = 'y'::text)) -EXPLAIN :default_analyze_flags EXECUTE delete_query('x', 'y'); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on simple_570026 simple (actual rows=0 loops=1) - -> Seq Scan on simple_570026 simple (actual rows=0 loops=1) - Filter: ((name = $1) OR (name = $2)) - Rows Removed by Filter: 1 -deallocate delete_query; --- prepared distributed insert/select --- we don't support EXPLAIN for prepared insert/selects of other types. -PREPARE distributed_insert_select AS INSERT INTO simple SELECT * FROM simple WHERE name IN ($1, $2); -EXPLAIN :default_explain_flags EXECUTE distributed_insert_select('x', 'y'); -Custom Scan (Citus Adaptive) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on simple_570026 citus_table_alias - -> Seq Scan on simple_570026 simple - Filter: ((id IS NOT NULL) AND (name = ANY ('{x,y}'::text[]))) -EXPLAIN :default_analyze_flags EXECUTE distributed_insert_select('x', 'y'); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on simple_570026 citus_table_alias (actual rows=0 loops=1) - -> Seq Scan on simple_570026 simple (actual rows=0 loops=1) - Filter: ((id IS NOT NULL) AND (name = ANY (ARRAY[$1, $2]))) - Rows Removed by Filter: 1 -deallocate distributed_insert_select; -DROP TABLE simple; --- prepared cte -BEGIN; -PREPARE cte_query AS -WITH keys AS ( - SELECT count(*) FROM - (SELECT DISTINCT l_orderkey, GREATEST(random(), 2) FROM lineitem_hash_part WHERE l_quantity > $1) t -), -series AS ( - SELECT s FROM generate_series(1, $2) s -), -delete_result AS ( - DELETE FROM lineitem_hash_part WHERE l_quantity < $3 RETURNING * -) -SELECT s FROM series; -EXPLAIN :default_explain_flags EXECUTE cte_query(2, 10, -1); -Custom Scan (Citus Adaptive) - -> Distributed Subplan XXX_1 - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Delete on lineitem_hash_part_360041 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - Filter: (l_quantity < '-1'::numeric) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Function Scan on generate_series s -EXPLAIN :default_analyze_flags EXECUTE cte_query(2, 10, -1); -Custom Scan (Citus Adaptive) (actual rows=10 loops=1) - -> Distributed Subplan XXX_1 - Intermediate Data Size: 0 bytes - Result destination: Send to 0 nodes - -> Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 4 - Tuple data received from nodes: 0 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Delete on lineitem_hash_part_360041 lineitem_hash_part (actual rows=0 loops=1) - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part (actual rows=0 loops=1) - Filter: (l_quantity < '-1'::numeric) - Rows Removed by Filter: 2885 - Task Count: 1 - Tuple data received from nodes: 40 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 40 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Function Scan on generate_series s (actual rows=10 loops=1) -ROLLBACK; --- https://github.com/citusdata/citus/issues/2009#issuecomment-653036502 -CREATE TABLE users_table_2 (user_id int primary key, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint); -SELECT create_reference_table('users_table_2'); - -PREPARE p4 (int, int) AS insert into users_table_2 ( value_1, user_id) select value_1, user_id + $2 FROM users_table_2 ON CONFLICT (user_id) DO UPDATE SET value_2 = EXCLUDED.value_1 + $1; -EXPLAIN :default_explain_flags execute p4(20,20); -Custom Scan (Citus Adaptive) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on users_table_2_570028 citus_table_alias - Conflict Resolution: UPDATE - Conflict Arbiter Indexes: users_table_2_pkey_570028 - -> Seq Scan on users_table_2_570028 users_table_2 -EXPLAIN :default_analyze_flags execute p4(20,20); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Insert on users_table_2_570028 citus_table_alias (actual rows=0 loops=1) - Conflict Resolution: UPDATE - Conflict Arbiter Indexes: users_table_2_pkey_570028 - Tuples Inserted: 0 - Conflicting Tuples: 0 - -> Seq Scan on users_table_2_570028 users_table_2 (actual rows=0 loops=1) --- simple test to confirm we can fetch long (>4KB) plans -EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF, BUFFERS OFF) SELECT * FROM users_table_2 WHERE value_1::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X'; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on users_table_2_570028 users_table_2 (actual rows=0 loops=1) - Filter: ((value_1)::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X'::text) -DROP TABLE users_table_2; --- sorted explain analyze output -CREATE TABLE explain_analyze_execution_time (a int); -INSERT INTO explain_analyze_execution_time VALUES (2); -SELECT create_distributed_table('explain_analyze_execution_time', 'a'); - --- show that we can sort the output wrt execution time --- we do the following hack to make the test outputs --- be consistent. First, ingest a single row then add --- pg_sleep() call on the query. Postgres will only --- sleep for the shard that has the single row, so that --- will definitely be slower -set citus.explain_analyze_sort_method to "taskId"; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 2 - Tuple data received from nodes: 4 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on explain_analyze_execution_time_570029 explain_analyze_execution_time (actual rows=0 loops=1) -set citus.explain_analyze_sort_method to "execution-time"; -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 2 - Tuple data received from nodes: 4 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 4 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on explain_analyze_execution_time_570030 explain_analyze_execution_time (actual rows=1 loops=1) --- reset back -reset citus.explain_analyze_sort_method; -DROP TABLE explain_analyze_execution_time; -CREATE SCHEMA multi_explain; -SET search_path TO multi_explain; --- test EXPLAIN ANALYZE when original query returns no columns -CREATE TABLE reference_table(a int); -SELECT create_reference_table('reference_table'); - -INSERT INTO reference_table VALUES (1); -EXPLAIN :default_analyze_flags SELECT FROM reference_table; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on reference_table_570031 reference_table (actual rows=1 loops=1) -CREATE TABLE distributed_table_1(a int, b int); -SELECT create_distributed_table('distributed_table_1','a'); - -INSERT INTO distributed_table_1 values (1,1); -select public.explain_filter(' -EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off) SELECT row_number() OVER() AS r FROM distributed_table_1 -', true); -WindowAgg (actual rows=1 loops=1) - -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1) -CREATE TABLE distributed_table_2(a int, b int); -SELECT create_distributed_table('distributed_table_2','a'); - -INSERT INTO distributed_table_2 VALUES (1,1); -select public.explain_filter(' -EXPLAIN (ANALYZE on, COSTS off, TIMING off, SUMMARY off, BUFFERS off) -WITH r AS (SELECT row_number() OVER () AS r FROM distributed_table_1) -SELECT * FROM distributed_table_2 -JOIN r ON (r = distributed_table_2.b) -LIMIT 3 -', true); -Limit (actual rows=1 loops=1) - -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - -> Distributed Subplan XXX_1 - Intermediate Data Size: 14 bytes - Result destination: Send to 2 nodes - -> WindowAgg (actual rows=1 loops=1) - -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1) - Task Count: 2 - Tuple data received from nodes: 16 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Limit (actual rows=1 loops=1) - -> Nested Loop (actual rows=1 loops=1) - Join Filter: (distributed_table_2.b = intermediate_result.r) - -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1) - -> Seq Scan on distributed_table_2_570034 distributed_table_2 (actual rows=1 loops=1) -EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subquery; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on reference_table_570031 reference_table (actual rows=1 loops=1) -PREPARE dummy_prep_stmt(int) AS SELECT FROM distributed_table_1; -EXPLAIN :default_analyze_flags EXECUTE dummy_prep_stmt(50); -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1) -CREATE TYPE multi_explain.int_wrapper_type AS (int_field int); -CREATE TABLE tbl (a int, b multi_explain.int_wrapper_type); -SELECT create_distributed_table('tbl', 'a'); - -EXPLAIN :default_analyze_flags SELECT * FROM tbl; -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 2 - Tuple data received from nodes: 0 bytes - Tasks Shown: One of 2 - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1) -PREPARE q1(int_wrapper_type) AS WITH a AS (SELECT * FROM tbl WHERE b = $1 AND a = 1 OFFSET 0) SELECT * FROM a; -EXPLAIN (COSTS false) EXECUTE q1('(1)'); -Custom Scan (Citus Adaptive) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on tbl_570036 tbl - Filter: ((b = '(1)'::multi_explain.int_wrapper_type) AND (a = 1)) -EXPLAIN :default_analyze_flags EXECUTE q1('(1)'); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1) - Filter: ((b = $1) AND (a = 1)) -PREPARE q2(int_wrapper_type) AS WITH a AS (UPDATE tbl SET b = $1 WHERE a = 1 RETURNING *) SELECT * FROM a; -EXPLAIN (COSTS false) EXECUTE q2('(1)'); -Custom Scan (Citus Adaptive) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> CTE Scan on a - CTE a - -> Update on tbl_570036 tbl - -> Seq Scan on tbl_570036 tbl - Filter: (a = 1) -EXPLAIN :default_analyze_flags EXECUTE q2('(1)'); -Custom Scan (Citus Adaptive) (actual rows=0 loops=1) - Task Count: 1 - Tuple data received from nodes: 0 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 0 bytes - Node: host=localhost port=xxxxx dbname=regression - -> CTE Scan on a (actual rows=0 loops=1) - CTE a - -> Update on tbl_570036 tbl (actual rows=0 loops=1) - -> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1) - Filter: (a = 1) --- EXPLAIN ANALYZE shouldn't execute SubPlans twice (bug #4212) -SET search_path TO multi_explain; -CREATE TABLE test_subplans (x int primary key, y int); -SELECT create_distributed_table('test_subplans','x'); - -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) -WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) -SELECT * FROM a; -Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - -> Distributed Subplan XXX_1 - Intermediate Data Size: 18 bytes - Result destination: Write locally - -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 16 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Insert on test_subplans_570038 (actual rows=1 loops=1) - -> Result (actual rows=1 loops=1) - Task Count: 1 - Tuple data received from nodes: 8 bytes - Tasks Shown: All - -> Task - Tuple data received from node: 8 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1) --- Only one row must exist -SELECT * FROM test_subplans; -1|2 --- Will fail with duplicate pk -EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) -WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) -SELECT * FROM a; -ERROR: duplicate key value violates unique constraint "test_subplans_pkey_570038" -DETAIL: Key (x)=(1) already exists. -CONTEXT: while executing command on localhost:xxxxx --- Test JSON format -TRUNCATE test_subplans; -EXPLAIN (FORMAT JSON, COSTS off, ANALYZE on, TIMING off, SUMMARY off, BUFFERS OFF) -WITH a AS (INSERT INTO test_subplans VALUES (1,2) RETURNING *) -SELECT * FROM a; -[ - { - "Plan": { - "Node Type": "Custom Scan", - "Custom Plan Provider": "Citus Adaptive", - "Parallel Aware": false, - "Async Capable": false, - "Actual Rows": 1, - "Actual Loops": 1, - "Distributed Query": { - "Subplans": [ - { - "Intermediate Data Size": "18 bytes", - "Result destination": "Write locally", - "PlannedStmt": [ - { - "Plan": { - "Node Type": "Custom Scan", - "Custom Plan Provider": "Citus Adaptive", - "Parallel Aware": false, - "Async Capable": false, - "Actual Rows": 1, - "Actual Loops": 1, - "Distributed Query": { - "Job": { - "Task Count": 1, - "Tuple data received from nodes": "16 bytes", - "Tasks Shown": "All", - "Tasks": [ - { - "Tuple data received from node": "16 bytes", - "Node": "host=localhost port=xxxxx dbname=regression", - "Remote Plan": [ - [ - { - "Plan": { - "Node Type": "ModifyTable", - "Operation": "Insert", - "Parallel Aware": false, - "Async Capable": false, - "Relation Name": "test_subplans_570038", - "Alias": "test_subplans_570038", - "Actual Rows": 1, - "Actual Loops": 1, - "Plans": [ - { - "Node Type": "Result", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Async Capable": false, - "Actual Rows": 1, - "Actual Loops": 1 - } - ] - }, - "Triggers": [ - ] - } - ] - - ] - } - ] - } - } - }, - "Triggers": [ - ] - } - ] - } - ], - "Job": { - "Task Count": 1, - "Tuple data received from nodes": "8 bytes", - "Tasks Shown": "All", - "Tasks": [ - { - "Tuple data received from node": "8 bytes", - "Node": "host=localhost port=xxxxx dbname=regression", - "Remote Plan": [ - [ - { - "Plan": { - "Node Type": "Function Scan", - "Parallel Aware": false, - "Async Capable": false, - "Function Name": "read_intermediate_result", - "Alias": "intermediate_result", - "Actual Rows": 1, - "Actual Loops": 1 - }, - "Triggers": [ - ] - } - ] - - ] - } - ] - } - } - }, - "Triggers": [ - ] - } -] --- Only one row must exist -SELECT * FROM test_subplans; -1|2 --- check when auto explain + analyze is enabled, we do not allow local execution. -CREATE SCHEMA test_auto_explain; -SET search_path TO 'test_auto_explain'; -CREATE TABLE test_ref_table (key int PRIMARY KEY); -SELECT create_reference_table('test_ref_table'); - -LOAD 'auto_explain'; -SET auto_explain.log_min_duration = 0; -set auto_explain.log_analyze to true; --- the following should not be locally executed since explain analyze is on -select * from test_ref_table; -DROP SCHEMA test_auto_explain CASCADE; -SET client_min_messages TO ERROR; -DROP SCHEMA multi_explain CASCADE; diff --git a/src/test/regress/expected/multi_metadata_sync_0.out b/src/test/regress/expected/multi_metadata_sync_0.out deleted file mode 100644 index 5ff926ff6..000000000 --- a/src/test/regress/expected/multi_metadata_sync_0.out +++ /dev/null @@ -1,2264 +0,0 @@ --- --- MULTI_METADATA_SYNC --- --- this test has different output for PG14 compared to PG15 --- In PG15, public schema is owned by pg_database_owner role --- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15; - server_version_ge_15 ---------------------------------------------------------------------- - f -(1 row) - --- Tests for metadata snapshot functions, metadata syncing functions and propagation of --- metadata changes to MX tables. --- Turn metadata sync off at first -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; -ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 2; -SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id -\gset -ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000; -SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset -SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset --- Create the necessary test utility function -SET citus.enable_metadata_sync TO OFF; -CREATE FUNCTION activate_node_snapshot() - RETURNS text[] - LANGUAGE C STRICT - AS 'citus'; -RESET citus.enable_metadata_sync; -COMMENT ON FUNCTION activate_node_snapshot() - IS 'commands to activate node snapshot'; --- Show that none of the existing tables are qualified to be MX tables -SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s'; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted ---------------------------------------------------------------------- -(0 rows) - --- Since password_encryption default has been changed to sha from md5 with PG14 --- we are updating it manually just for consistent test results between PG versions. -ALTER SYSTEM SET password_encryption TO md5; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(0.1); - pg_sleep ---------------------------------------------------------------------- - -(1 row) - -SET client_min_messages TO ERROR; -ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword'; -RESET client_min_messages; --- Show that, with no MX tables, activate node snapshot contains only the delete commands, --- pg_dist_node entries, pg_dist_object entries and roles. -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; -(33 rows) - --- Create a test table with constraints and SERIAL and default from user defined sequence -CREATE SEQUENCE user_defined_seq; -CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq')); -set citus.shard_count to 8; -set citus.shard_replication_factor to 1; -SELECT create_distributed_table('mx_test_table', 'col_1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -reset citus.shard_count; --- Set the replication model of the test table to streaming replication so that it is --- considered as an MX table -UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass; --- add a single shard table and verify the creation commands are included in the activate node snapshot -CREATE TABLE single_shard_tbl(a int); -SELECT create_distributed_table('single_shard_tbl', null); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO single_shard_tbl VALUES (1); -reset citus.shard_replication_factor; --- Show that the created MX table is and its sequences are included in the activate node snapshot -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE public.mx_test_table OWNER TO postgres - ALTER TABLE public.single_shard_tbl OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - CREATE TABLE public.single_shard_tbl (a integer) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS public.mx_test_table CASCADE - DROP TABLE IF EXISTS public.single_shard_tbl CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal.add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency('public.single_shard_tbl'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('public.mx_test_table') - SELECT worker_create_truncate_trigger('public.single_shard_tbl') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(61 rows) - --- Drop single shard table -DROP TABLE single_shard_tbl; --- Show that CREATE INDEX commands are included in the activate node snapshot -CREATE INDEX mx_index ON mx_test_table(col_2); -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE public.mx_test_table OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2) - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS public.mx_test_table CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('public.mx_test_table') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(52 rows) - --- Show that schema changes are included in the activate node snapshot -CREATE SCHEMA mx_testing_schema; -ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) - CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(54 rows) - --- Show that append distributed tables are not included in the activate node snapshot -CREATE TABLE non_mx_test_table (col_1 int, col_2 text); -SELECT create_distributed_table('non_mx_test_table', 'col_1', 'append'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) - CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(54 rows) - --- Show that range distributed tables are not included in the activate node snapshot -UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) - CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(54 rows) - --- Test start_metadata_sync_to_node and citus_activate_node UDFs --- Ensure that hasmetadata=false for all nodes except for the coordinator node -SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- Show that metadata can not be synced on secondary node -SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset -SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); - master_add_node ---------------------------------------------------------------------- - 5 -(1 row) - -SELECT start_metadata_sync_to_node('localhost', 8888); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; - hasmetadata ---------------------------------------------------------------------- - f -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', 8888); -NOTICE: (localhost,8888) is a secondary node: to clear the metadata, you should clear metadata from the primary node - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; - hasmetadata ---------------------------------------------------------------------- - f -(1 row) - --- Add a node to another cluster to make sure it's also synced -SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); - master_add_secondary_node ---------------------------------------------------------------------- - 6 -(1 row) - -\c - - - :master_port --- Run start_metadata_sync_to_node and citus_activate_node and check that it marked hasmetadata for that worker -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; - nodeid | hasmetadata ---------------------------------------------------------------------- - 2 | t -(1 row) - --- Check that the metadata has been copied to the worker -\c - - - :worker_1_port -SELECT * FROM pg_dist_local_group; - groupid ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------------------------------------------------------------------- - 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f - 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t - 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t - 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t - 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t -(5 rows) - -SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted ---------------------------------------------------------------------- - mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f -(1 row) - -SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- - mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 - mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 - mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 - mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 - mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 - mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 - mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 - mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 -(8 rows) - -SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------------------------------------------------------------- - 1310000 | 1 | 0 | localhost | 57637 | 100000 - 1310001 | 1 | 0 | localhost | 57638 | 100001 - 1310002 | 1 | 0 | localhost | 57637 | 100002 - 1310003 | 1 | 0 | localhost | 57638 | 100003 - 1310004 | 1 | 0 | localhost | 57637 | 100004 - 1310005 | 1 | 0 | localhost | 57638 | 100005 - 1310006 | 1 | 0 | localhost | 57637 | 100006 - 1310007 | 1 | 0 | localhost | 57638 | 100007 -(8 rows) - -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col_1 | integer | - col_2 | text | not null - col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) - col_4 | bigint | default nextval('user_defined_seq'::regclass) -(4 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_1 | integer | col_1 -(1 row) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_testing_schema.mx_index'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_2 | text | col_2 -(1 row) - --- Check that pg_dist_colocation is synced -SELECT * FROM pg_dist_colocation ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------------------------------------------------------------- - 2 | 8 | 1 | 23 | 0 -(1 row) - --- Make sure that truncate trigger has been set for the MX table on worker -SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- Make sure that citus_activate_node considers foreign key constraints -\c - - - :master_port --- Since we're superuser, we can set the replication model to 'streaming' to --- create some MX tables -SET citus.shard_replication_factor TO 1; -CREATE SCHEMA mx_testing_schema_2; -CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE(col1, col3)); -CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text, - FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1 (col1, col3)); -SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- Check that foreign key metadata exists on the worker -\c - - - :worker_1_port -SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass; - Constraint | Definition ---------------------------------------------------------------------- - fk_test_2_col1_col2_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) -(1 row) - -\c - - - :master_port -DROP TABLE mx_testing_schema_2.fk_test_2; -DROP TABLE mx_testing_schema.fk_test_1; -RESET citus.shard_replication_factor; --- Check that repeated calls to citus_activate_node has no side effects -\c - - - :master_port -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -\c - - - :worker_1_port -SELECT * FROM pg_dist_local_group; - groupid ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------------------------------------------------------------------- - 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f - 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t - 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t - 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t - 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t -(5 rows) - -SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted ---------------------------------------------------------------------- - mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f -(1 row) - -SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- - mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 - mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 - mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 - mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 - mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 - mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 - mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 - mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 -(8 rows) - -SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------------------------------------------------------------- - 1310000 | 1 | 0 | localhost | 57637 | 100000 - 1310001 | 1 | 0 | localhost | 57638 | 100001 - 1310002 | 1 | 0 | localhost | 57637 | 100002 - 1310003 | 1 | 0 | localhost | 57638 | 100003 - 1310004 | 1 | 0 | localhost | 57637 | 100004 - 1310005 | 1 | 0 | localhost | 57638 | 100005 - 1310006 | 1 | 0 | localhost | 57637 | 100006 - 1310007 | 1 | 0 | localhost | 57638 | 100007 -(8 rows) - -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col_1 | integer | - col_2 | text | not null - col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) - col_4 | bigint | default nextval('user_defined_seq'::regclass) -(4 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_1 | integer | col_1 -(1 row) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_testing_schema.mx_index'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_2 | text | col_2 -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- Make sure that citus_activate_node can be called inside a transaction and rollbacked -\c - - - :master_port -BEGIN; -SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -ROLLBACK; -SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; - hasmetadata ---------------------------------------------------------------------- - f -(1 row) - --- Check that the distributed table can be queried from the worker -\c - - - :master_port -SET citus.shard_replication_factor TO 1; -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -CREATE TABLE mx_query_test (a int, b text, c int); -SELECT create_distributed_table('mx_query_test', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE single_shard_tbl(a int); -SELECT create_distributed_table('single_shard_tbl', null); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO single_shard_tbl VALUES (1); -SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_query_test'::regclass; - repmodel ---------------------------------------------------------------------- - s -(1 row) - -INSERT INTO mx_query_test VALUES (1, 'one', 1); -INSERT INTO mx_query_test VALUES (2, 'two', 4); -INSERT INTO mx_query_test VALUES (3, 'three', 9); -INSERT INTO mx_query_test VALUES (4, 'four', 16); -INSERT INTO mx_query_test VALUES (5, 'five', 24); -\c - - - :worker_1_port -SELECT * FROM mx_query_test ORDER BY a; - a | b | c ---------------------------------------------------------------------- - 1 | one | 1 - 2 | two | 4 - 3 | three | 9 - 4 | four | 16 - 5 | five | 24 -(5 rows) - -INSERT INTO mx_query_test VALUES (6, 'six', 36); -UPDATE mx_query_test SET c = 25 WHERE a = 5; -SELECT * FROM single_shard_tbl ORDER BY a; - a ---------------------------------------------------------------------- - 1 -(1 row) - -INSERT INTO single_shard_tbl VALUES (2); -\c - - - :master_port -SELECT * FROM mx_query_test ORDER BY a; - a | b | c ---------------------------------------------------------------------- - 1 | one | 1 - 2 | two | 4 - 3 | three | 9 - 4 | four | 16 - 5 | five | 25 - 6 | six | 36 -(6 rows) - -SELECT * FROM single_shard_tbl ORDER BY a; - a ---------------------------------------------------------------------- - 1 - 2 -(2 rows) - -\c - - - :master_port -DROP TABLE mx_query_test; -DROP TABLE single_shard_tbl; --- Check that stop_metadata_sync_to_node function sets hasmetadata of the node to false -\c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata ---------------------------------------------------------------------- - t -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata ---------------------------------------------------------------------- - f -(1 row) - --- Test DDL propagation in MX tables -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SET citus.shard_count = 5; -CREATE SCHEMA mx_test_schema_1; -CREATE SCHEMA mx_test_schema_2; --- Create MX tables -SET citus.shard_replication_factor TO 1; -CREATE TABLE mx_test_schema_1.mx_table_1 (col1 int UNIQUE, col2 text); -CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1); -CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text); -CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2); -ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1); -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col1 | integer | - col2 | text | -(2 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_1.mx_table_1_col1_key'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col1 | integer | col1 -(1 row) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_1.mx_index_1'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col1 | integer | col1 -(1 row) - -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_2.mx_table_2'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col1 | integer | - col2 | text | -(2 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_2.mx_index_2'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col2 | text | col2 -(1 row) - -SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_2.mx_table_2'::regclass; - Constraint | Definition ---------------------------------------------------------------------- - mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1) -(1 row) - -SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table('mx_test_schema_2.mx_table_2', 'col1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- Check that created tables are marked as streaming replicated tables -SELECT - logicalrelid, repmodel -FROM - pg_dist_partition -WHERE - logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass - OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass -ORDER BY - logicalrelid::text; - logicalrelid | repmodel ---------------------------------------------------------------------- - mx_test_schema_1.mx_table_1 | s - mx_test_schema_2.mx_table_2 | s -(2 rows) - --- See the shards and placements of the mx tables -SELECT - logicalrelid, shardid, nodename, nodeport -FROM - pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE - logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass - OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass -ORDER BY - logicalrelid::text, shardid; - logicalrelid | shardid | nodename | nodeport ---------------------------------------------------------------------- - mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310023 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310024 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310025 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310026 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310027 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310028 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310029 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310030 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310031 | localhost | 57637 -(10 rows) - --- Check that metadata of MX tables exist on the metadata worker -\c - - - :worker_1_port --- Check that tables are created -\dt mx_test_schema_?.mx_table_? - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - mx_test_schema_1 | mx_table_1 | table | postgres - mx_test_schema_2 | mx_table_2 | table | postgres -(2 rows) - --- Check that table metadata are created -SELECT - logicalrelid, repmodel -FROM - pg_dist_partition -WHERE - logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass - OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass -ORDER BY - logicalrelid::text; - logicalrelid | repmodel ---------------------------------------------------------------------- - mx_test_schema_1.mx_table_1 | s - mx_test_schema_2.mx_table_2 | s -(2 rows) - --- Check that shard and placement data are created -SELECT - logicalrelid, shardid, nodename, nodeport -FROM - pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE - logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass - OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass -ORDER BY - logicalrelid::text, shardid; - logicalrelid | shardid | nodename | nodeport ---------------------------------------------------------------------- - mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310023 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310024 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310025 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310026 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310027 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310028 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310029 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310030 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310031 | localhost | 57637 -(10 rows) - --- Check that metadata of MX tables don't exist on the non-metadata worker -\c - - - :worker_2_port -\d mx_test_schema_1.mx_table_1 -\d mx_test_schema_2.mx_table_2 -SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_test_schema%'; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted ---------------------------------------------------------------------- -(0 rows) - -SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_test_schema%'; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- -(0 rows) - -SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------------------------------------------------------------- -(0 rows) - --- Check that CREATE INDEX statement is propagated -\c - - - :master_port -SET client_min_messages TO 'ERROR'; -CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1); -ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1); -\c - - - :worker_1_port -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_2.mx_index_3'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col1 | integer | col1 -(1 row) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_2.mx_table_2_col1_key'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col1 | integer | col1 -(1 row) - --- Check that DROP INDEX statement is propagated -\c - - - :master_port -DROP INDEX mx_test_schema_2.mx_index_3; -\c - - - :worker_1_port -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_2.mx_index_3'::regclass; -ERROR: relation "mx_test_schema_2.mx_index_3" does not exist --- Check that ALTER TABLE statements are propagated -\c - - - :master_port -ALTER TABLE mx_test_schema_1.mx_table_1 ADD COLUMN col3 NUMERIC; -ALTER TABLE mx_test_schema_1.mx_table_1 ALTER COLUMN col3 SET DATA TYPE INT; -ALTER TABLE - mx_test_schema_1.mx_table_1 -ADD CONSTRAINT - mx_fk_constraint -FOREIGN KEY - (col1) -REFERENCES - mx_test_schema_2.mx_table_2(col1); -\c - - - :worker_1_port -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col1 | integer | - col2 | text | - col3 | integer | -(3 rows) - -SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Constraint | Definition ---------------------------------------------------------------------- - mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) -(1 row) - --- Check that foreign key constraint with NOT VALID works as well -\c - - - :master_port -ALTER TABLE mx_test_schema_1.mx_table_1 DROP CONSTRAINT mx_fk_constraint; -ALTER TABLE - mx_test_schema_1.mx_table_1 -ADD CONSTRAINT - mx_fk_constraint_2 -FOREIGN KEY - (col1) -REFERENCES - mx_test_schema_2.mx_table_2(col1) -NOT VALID; -\c - - - :worker_1_port -SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Constraint | Definition ---------------------------------------------------------------------- - mx_fk_constraint_2 | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) -(1 row) - --- Check that update_distributed_table_colocation call propagates the changes to the workers -\c - - - :master_port -SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gset -ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000; -SET citus.shard_count TO 7; -SET citus.shard_replication_factor TO 1; -CREATE TABLE mx_colocation_test_1 (a int); -SELECT create_distributed_table('mx_colocation_test_1', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE mx_colocation_test_2 (a int); -SELECT create_distributed_table('mx_colocation_test_2', 'a', colocate_with:='none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- Reset the colocation IDs of the test tables -DELETE FROM - pg_dist_colocation -WHERE EXISTS ( - SELECT 1 - FROM pg_dist_partition - WHERE - colocationid = pg_dist_partition.colocationid - AND pg_dist_partition.logicalrelid = 'mx_colocation_test_1'::regclass); --- Check the colocation IDs of the created tables -SELECT - logicalrelid, colocationid -FROM - pg_dist_partition -WHERE - logicalrelid = 'mx_colocation_test_1'::regclass - OR logicalrelid = 'mx_colocation_test_2'::regclass -ORDER BY logicalrelid::text; - logicalrelid | colocationid ---------------------------------------------------------------------- - mx_colocation_test_1 | 10000 - mx_colocation_test_2 | 10001 -(2 rows) - --- Update colocation and see the changes on the master and the worker -SELECT update_distributed_table_colocation('mx_colocation_test_1', colocate_with => 'mx_colocation_test_2'); - update_distributed_table_colocation ---------------------------------------------------------------------- - -(1 row) - -SELECT - logicalrelid, colocationid -FROM - pg_dist_partition -WHERE - logicalrelid = 'mx_colocation_test_1'::regclass - OR logicalrelid = 'mx_colocation_test_2'::regclass -ORDER BY - logicalrelid::text; - logicalrelid | colocationid ---------------------------------------------------------------------- - mx_colocation_test_1 | 10001 - mx_colocation_test_2 | 10001 -(2 rows) - -\c - - - :worker_1_port -SELECT - logicalrelid, colocationid -FROM - pg_dist_partition -WHERE - logicalrelid = 'mx_colocation_test_1'::regclass - OR logicalrelid = 'mx_colocation_test_2'::regclass -ORDER BY - logicalrelid::text; - logicalrelid | colocationid ---------------------------------------------------------------------- - mx_colocation_test_1 | 10001 - mx_colocation_test_2 | 10001 -(2 rows) - -\c - - - :master_port --- Check that DROP TABLE on MX tables works -DROP TABLE mx_colocation_test_1; -DROP TABLE mx_colocation_test_2; -\d mx_colocation_test_1 -\d mx_colocation_test_2 -\c - - - :worker_1_port -\d mx_colocation_test_1 -\d mx_colocation_test_2 --- Check that dropped MX table can be recreated again -\c - - - :master_port -SET citus.shard_count TO 7; -SET citus.shard_replication_factor TO 1; -CREATE TABLE mx_temp_drop_test (a int); -SELECT create_distributed_table('mx_temp_drop_test', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; - logicalrelid | repmodel ---------------------------------------------------------------------- - mx_temp_drop_test | s -(1 row) - -DROP TABLE mx_temp_drop_test; -CREATE TABLE mx_temp_drop_test (a int); -SELECT create_distributed_table('mx_temp_drop_test', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; - logicalrelid | repmodel ---------------------------------------------------------------------- - mx_temp_drop_test | s -(1 row) - -DROP TABLE mx_temp_drop_test; --- Check that MX tables can be created with SERIAL columns -\c - - - :master_port -SET citus.shard_count TO 3; -SET citus.shard_replication_factor TO 1; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - --- sync table with serial column after create_distributed_table -CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); -SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -DROP TABLE mx_table_with_small_sequence; --- Show that create_distributed_table works with a serial column -CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); -SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO mx_table_with_small_sequence VALUES (0); -\c - - - :worker_1_port --- Insert doesn't work because the defaults are of type int and smallint -INSERT INTO mx_table_with_small_sequence VALUES (1), (3); -ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint -\c - - - :master_port -SET citus.shard_replication_factor TO 1; --- Create an MX table with (BIGSERIAL) sequences -CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL); -SELECT create_distributed_table('mx_table_with_sequence', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO mx_table_with_sequence VALUES (0); -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - a | integer | - b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) - c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) -(3 rows) - -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_b_seq | sequence | postgres -(1 row) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_c_seq | sequence | postgres -(1 row) - --- Check that the sequences created on the metadata worker as well -\c - - - :worker_1_port -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - a | integer | - b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) - c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) -(3 rows) - -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_b_seq | sequence | postgres -(1 row) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_c_seq | sequence | postgres -(1 row) - --- Insert works because the defaults are of type bigint -INSERT INTO mx_table_with_sequence VALUES (1), (3); --- check that pg_depend records exist on the worker -SELECT refobjsubid FROM pg_depend -WHERE objid = 'mx_table_with_sequence_b_seq'::regclass AND refobjid = 'mx_table_with_sequence'::regclass; - refobjsubid ---------------------------------------------------------------------- - 2 -(1 row) - -SELECT refobjsubid FROM pg_depend -WHERE objid = 'mx_table_with_sequence_c_seq'::regclass AND refobjid = 'mx_table_with_sequence'::regclass; - refobjsubid ---------------------------------------------------------------------- - 3 -(1 row) - --- Check that the sequences on the worker have their own space -SELECT nextval('mx_table_with_sequence_b_seq'); - nextval ---------------------------------------------------------------------- - 281474976710659 -(1 row) - -SELECT nextval('mx_table_with_sequence_c_seq'); - nextval ---------------------------------------------------------------------- - 281474976710659 -(1 row) - --- Check that adding a new metadata node sets the sequence space correctly -\c - - - :master_port -SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -\c - - - :worker_2_port -SELECT groupid FROM pg_dist_local_group; - groupid ---------------------------------------------------------------------- - 2 -(1 row) - -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - a | integer | - b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) - c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) -(3 rows) - -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_b_seq | sequence | postgres -(1 row) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_c_seq | sequence | postgres -(1 row) - -SELECT nextval('mx_table_with_sequence_b_seq'); - nextval ---------------------------------------------------------------------- - 562949953421313 -(1 row) - -SELECT nextval('mx_table_with_sequence_c_seq'); - nextval ---------------------------------------------------------------------- - 562949953421313 -(1 row) - --- Insert doesn't work because the defaults are of type int and smallint -INSERT INTO mx_table_with_small_sequence VALUES (2), (4); -ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint --- Insert works because the defaults are of type bigint -INSERT INTO mx_table_with_sequence VALUES (2), (4); --- Check that dropping the mx table with sequences works as expected -\c - - - :master_port --- check our small sequence values -SELECT a, b, c FROM mx_table_with_small_sequence ORDER BY a,b,c; - a | b | c ---------------------------------------------------------------------- - 0 | 1 | 1 -(1 row) - ---check our bigint sequence values -SELECT a, b, c FROM mx_table_with_sequence ORDER BY a,b,c; - a | b | c ---------------------------------------------------------------------- - 0 | 1 | 1 - 1 | 281474976710657 | 281474976710657 - 2 | 562949953421314 | 562949953421314 - 3 | 281474976710658 | 281474976710658 - 4 | 562949953421315 | 562949953421315 -(5 rows) - --- Check that dropping the mx table with sequences works as expected -DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; -\d mx_table_with_sequence -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - --- Check that the sequences are dropped from the workers -\c - - - :worker_1_port -\d mx_table_with_sequence -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - --- Check that the sequences are dropped from the workers -\c - - - :worker_2_port -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - --- Check that MX sequences play well with non-super users -\c - - - :master_port --- Remove a node so that shards and sequences won't be created on table creation. Therefore, --- we can test that citus_activate_node can actually create the sequence with proper --- owner -CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement; -CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition; -CREATE TABLE pg_dist_object_temp AS SELECT * FROM pg_catalog.pg_dist_object; -DELETE FROM pg_dist_placement; -DELETE FROM pg_dist_partition; -DELETE FROM pg_catalog.pg_dist_object; -SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset -SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node ---------------------------------------------------------------------- - -(1 row) - - -- the master user needs superuser permissions to change the replication model -CREATE USER mx_user WITH SUPERUSER; -\c - mx_user - :master_port --- Create an mx table as a different user -CREATE TABLE mx_table (a int, b BIGSERIAL); -SET citus.shard_replication_factor TO 1; -SELECT create_distributed_table('mx_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -\c - postgres - :master_port -SELECT master_add_node('localhost', :worker_2_port); - master_add_node ---------------------------------------------------------------------- - 7 -(1 row) - -\c - mx_user - :worker_1_port -SELECT nextval('mx_table_b_seq'); - nextval ---------------------------------------------------------------------- - 281474976710657 -(1 row) - -INSERT INTO mx_table (a) VALUES (37); -INSERT INTO mx_table (a) VALUES (38); -SELECT * FROM mx_table ORDER BY a; - a | b ---------------------------------------------------------------------- - 37 | 281474976710658 - 38 | 281474976710659 -(2 rows) - -\c - mx_user - :worker_2_port -SELECT nextval('mx_table_b_seq'); - nextval ---------------------------------------------------------------------- - 1125899906842625 -(1 row) - -INSERT INTO mx_table (a) VALUES (39); -INSERT INTO mx_table (a) VALUES (40); -SELECT * FROM mx_table ORDER BY a; - a | b ---------------------------------------------------------------------- - 37 | 281474976710658 - 38 | 281474976710659 - 39 | 1125899906842626 - 40 | 1125899906842627 -(4 rows) - -\c - mx_user - :master_port -DROP TABLE mx_table; --- put the metadata back into a consistent state -\c - postgres - :master_port -INSERT INTO pg_dist_placement SELECT * FROM pg_dist_placement_temp; -INSERT INTO pg_dist_partition SELECT * FROM pg_dist_partition_temp; -INSERT INTO pg_catalog.pg_dist_object SELECT * FROM pg_dist_object_temp ON CONFLICT ON CONSTRAINT pg_dist_object_pkey DO NOTHING; -DROP TABLE pg_dist_placement_temp; -DROP TABLE pg_dist_partition_temp; -DROP TABLE pg_dist_object_temp; -UPDATE pg_dist_placement - SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) - WHERE groupid = :old_worker_2_group; -\c - - - :worker_1_port -UPDATE pg_dist_placement - SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) - WHERE groupid = :old_worker_2_group; -\c - - - :worker_2_port -UPDATE pg_dist_placement - SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) - WHERE groupid = :old_worker_2_group; -\c - - - :master_port -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -DROP USER mx_user; --- Check that create_reference_table creates the metadata on workers -\c - - - :master_port -CREATE TABLE mx_ref (col_1 int, col_2 text); -SELECT create_reference_table('mx_ref'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - --- make sure that adding/removing nodes doesn't cause --- multiple colocation entries for reference tables -SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; - count ---------------------------------------------------------------------- - 1 -(1 row) - -\dt mx_ref - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_ref | table | postgres -(1 row) - -\c - - - :worker_1_port -\dt mx_ref - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_ref | table | postgres -(1 row) - -SELECT - logicalrelid, partmethod, repmodel, shardid, placementid, nodename, nodeport -FROM - pg_dist_partition - NATURAL JOIN pg_dist_shard - NATURAL JOIN pg_dist_shard_placement -WHERE - logicalrelid = 'mx_ref'::regclass -ORDER BY - nodeport; - logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport ---------------------------------------------------------------------- - mx_ref | n | t | 1310074 | 100074 | localhost | 57636 - mx_ref | n | t | 1310074 | 100075 | localhost | 57637 - mx_ref | n | t | 1310074 | 100076 | localhost | 57638 -(3 rows) - -SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset --- make sure we have the pg_dist_colocation record on the worker -SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- Check that DDL commands are propagated to reference tables on workers -\c - - - :master_port -ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0; -CREATE INDEX mx_ref_index ON mx_ref(col_1); -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col_1 | integer | - col_2 | text | - col_3 | numeric | default 0 -(3 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_ref_index'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_1 | integer | col_1 -(1 row) - -\c - - - :worker_1_port -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col_1 | integer | - col_2 | text | - col_3 | numeric | default 0 -(3 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_ref_index'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_1 | integer | col_1 -(1 row) - --- Check that metada is cleaned successfully upon drop table -\c - - - :master_port -DROP TABLE mx_ref; -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_ref_index'::regclass; -ERROR: relation "mx_ref_index" does not exist -\c - - - :worker_1_port -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_ref_index'::regclass; -ERROR: relation "mx_ref_index" does not exist -SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- -(0 rows) - -SELECT * FROM pg_dist_shard_placement WHERE shardid=:ref_table_shardid; - shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------------------------------------------------------------- -(0 rows) - --- Check that master_add_node propagates the metadata about new placements of a reference table -\c - - - :master_port -SELECT groupid AS old_worker_2_group - FROM pg_dist_node WHERE nodeport = :worker_2_port \gset -CREATE TABLE tmp_placement AS - SELECT * FROM pg_dist_placement WHERE groupid = :old_worker_2_group; -DELETE FROM pg_dist_placement - WHERE groupid = :old_worker_2_group; -SELECT master_remove_node('localhost', :worker_2_port); -WARNING: could not find any shard placements for shardId 1310001 -WARNING: could not find any shard placements for shardId 1310023 -WARNING: could not find any shard placements for shardId 1310028 - master_remove_node ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE mx_ref (col_1 int, col_2 text); -SELECT create_reference_table('mx_ref'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -SELECT shardid, nodename, nodeport -FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE logicalrelid='mx_ref'::regclass; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 1310075 | localhost | 57636 - 1310075 | localhost | 57637 -(2 rows) - -\c - - - :worker_1_port -SELECT shardid, nodename, nodeport -FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE logicalrelid='mx_ref'::regclass; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 1310075 | localhost | 57636 - 1310075 | localhost | 57637 -(2 rows) - -\c - - - :master_port -SET client_min_messages TO ERROR; -SELECT master_add_node('localhost', :worker_2_port); - master_add_node ---------------------------------------------------------------------- - 8 -(1 row) - -RESET client_min_messages; -SELECT shardid, nodename, nodeport -FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE logicalrelid='mx_ref'::regclass -ORDER BY shardid, nodeport; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 1310075 | localhost | 57636 - 1310075 | localhost | 57637 -(2 rows) - -\c - - - :worker_1_port -SELECT shardid, nodename, nodeport -FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE logicalrelid='mx_ref'::regclass -ORDER BY shardid, nodeport; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 1310075 | localhost | 57636 - 1310075 | localhost | 57637 -(2 rows) - --- Get the metadata back into a consistent state -\c - - - :master_port -INSERT INTO pg_dist_placement (SELECT * FROM tmp_placement); -DROP TABLE tmp_placement; -UPDATE pg_dist_placement - SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) - WHERE groupid = :old_worker_2_group; -\c - - - :worker_1_port -UPDATE pg_dist_placement - SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) - WHERE groupid = :old_worker_2_group; --- Confirm that shouldhaveshards is 'true' -\c - - - :master_port -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - t -(1 row) - -\c - postgres - :worker_1_port -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - t -(1 row) - --- Check that setting shouldhaveshards to false is correctly transferred to other mx nodes -\c - - - :master_port -SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', false); - master_set_node_property ---------------------------------------------------------------------- - -(1 row) - -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - f -(1 row) - -\c - postgres - :worker_1_port -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - f -(1 row) - --- Check that setting shouldhaveshards to true is correctly transferred to other mx nodes -\c - postgres - :master_port -SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', true); - master_set_node_property ---------------------------------------------------------------------- - -(1 row) - -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - t -(1 row) - -\c - postgres - :worker_1_port -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - t -(1 row) - -\c - - - :master_port --- --- Check that metadata commands error out if any nodes are out-of-sync --- --- increase metadata_sync intervals to avoid metadata sync while we test -ALTER SYSTEM SET citus.metadata_sync_interval TO 300000; -ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 300000; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SET citus.shard_replication_factor TO 1; -CREATE TABLE dist_table_1(a int); -SELECT create_distributed_table('dist_table_1', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -UPDATE pg_dist_node SET metadatasynced=false WHERE nodeport=:worker_1_port; -SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata | metadatasynced ---------------------------------------------------------------------- - t | f -(1 row) - -CREATE TABLE dist_table_2(a int); -SELECT create_distributed_table('dist_table_2', 'a'); -ERROR: localhost:xxxxx is a metadata node, but is out of sync -HINT: If the node is up, wait until metadata gets synced to it and try again. -SELECT create_reference_table('dist_table_2'); -ERROR: localhost:xxxxx is a metadata node, but is out of sync -HINT: If the node is up, wait until metadata gets synced to it and try again. -ALTER TABLE dist_table_1 ADD COLUMN b int; -ERROR: localhost:xxxxx is a metadata node, but is out of sync -HINT: If the node is up, wait until metadata gets synced to it and try again. -SELECT citus_disable_node_and_wait('localhost', :worker_1_port); -ERROR: disabling the first worker node in the metadata is not allowed -DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations. -HINT: You can force disabling node, SELECT citus_disable_node('localhost', 57637, synchronous:=true); -CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)" -PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM -SELECT citus_disable_node_and_wait('localhost', :worker_2_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx -DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table -HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables -CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)" -PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM -SELECT master_remove_node('localhost', :worker_1_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx -DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table -HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables -SELECT master_remove_node('localhost', :worker_2_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx -DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table -HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables --- master_update_node should succeed -SELECT nodeid AS worker_2_nodeid FROM pg_dist_node WHERE nodeport=:worker_2_port \gset -SELECT master_update_node(:worker_2_nodeid, 'localhost', 4444); - master_update_node ---------------------------------------------------------------------- - -(1 row) - -SELECT master_update_node(:worker_2_nodeid, 'localhost', :worker_2_port); - master_update_node ---------------------------------------------------------------------- - -(1 row) - -ALTER SYSTEM SET citus.metadata_sync_interval TO DEFAULT; -ALTER SYSTEM SET citus.metadata_sync_retry_interval TO DEFAULT; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - --- make sure that all the nodes have valid metadata before moving forward -SELECT wait_until_metadata_sync(60000); - wait_until_metadata_sync ---------------------------------------------------------------------- - -(1 row) - -SELECT master_add_node('localhost', :worker_2_port); - master_add_node ---------------------------------------------------------------------- - 8 -(1 row) - -CREATE SEQUENCE mx_test_sequence_0; -CREATE SEQUENCE mx_test_sequence_1; --- test create_distributed_table -CREATE TABLE test_table (id int DEFAULT nextval('mx_test_sequence_0')); -SELECT create_distributed_table('test_table', 'id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- shouldn't work since it's partition column -ALTER TABLE test_table ALTER COLUMN id SET DEFAULT nextval('mx_test_sequence_1'); -ERROR: cannot execute ALTER TABLE command involving partition column --- test different plausible commands -ALTER TABLE test_table ADD COLUMN id2 int DEFAULT nextval('mx_test_sequence_1'); -ALTER TABLE test_table ALTER COLUMN id2 DROP DEFAULT; -ALTER TABLE test_table ALTER COLUMN id2 SET DEFAULT nextval('mx_test_sequence_1'); -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.mx_test_sequence_0 OWNER TO postgres - ALTER SEQUENCE public.mx_test_sequence_1 OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_fk_constraint_2 FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) NOT VALID - ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_table_1_col1_key UNIQUE (col1) - ALTER TABLE mx_test_schema_1.mx_table_1 OWNER TO postgres - ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1) - ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1) - ALTER TABLE mx_test_schema_2.mx_table_2 OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - ALTER TABLE public.dist_table_1 OWNER TO postgres - ALTER TABLE public.mx_ref OWNER TO postgres - ALTER TABLE public.test_table OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) - CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 USING btree (col1) - CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 USING btree (col2) - CREATE SCHEMA IF NOT EXISTS mx_test_schema_1 AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS mx_test_schema_2 AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS mx_testing_schema_2 AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE mx_test_schema_1.mx_table_1 (col1 integer, col2 text, col3 integer) USING heap - CREATE TABLE mx_test_schema_2.mx_table_2 (col1 integer, col2 text) USING heap - CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - CREATE TABLE public.dist_table_1 (a integer) USING heap - CREATE TABLE public.mx_ref (col_1 integer, col_2 text) USING heap - CREATE TABLE public.test_table (id integer DEFAULT worker_nextval('public.mx_test_sequence_0'::regclass), id2 integer DEFAULT worker_nextval('public.mx_test_sequence_1'::regclass)) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS mx_test_schema_1.mx_table_1 CASCADE - DROP TABLE IF EXISTS mx_test_schema_2.mx_table_2 CASCADE - DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE - DROP TABLE IF EXISTS public.dist_table_1 CASCADE - DROP TABLE IF EXISTS public.mx_ref CASCADE - DROP TABLE IF EXISTS public.test_table CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (5, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(6, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(8, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal.add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal.add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') - SELECT citus_internal.add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') - SELECT citus_internal.add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_1.mx_table_1'); - SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_2.mx_table_2'); - SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency('public.dist_table_1'); - SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_ref'); - SELECT pg_catalog.worker_drop_sequence_dependency('public.test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_sequence_0 AS integer INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE','integer') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_sequence_1 AS integer INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE','integer') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('mx_test_schema_1.mx_table_1') - SELECT worker_create_truncate_trigger('mx_test_schema_2.mx_table_2') - SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SELECT worker_create_truncate_trigger('public.dist_table_1') - SELECT worker_create_truncate_trigger('public.mx_ref') - SELECT worker_create_truncate_trigger('public.test_table') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(118 rows) - --- shouldn't work since test_table is MX -ALTER TABLE test_table ADD COLUMN id3 bigserial; -ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers --- shouldn't work since the above operations should be the only subcommands -ALTER TABLE test_table ADD COLUMN id4 int DEFAULT nextval('mx_test_sequence_1') CHECK (id4 > 0); -ERROR: cannot execute ADD COLUMN .. DEFAULT nextval('..') command with other subcommands/constraints -HINT: You can issue each subcommand separately -ALTER TABLE test_table ADD COLUMN id4 int, ADD COLUMN id5 int DEFAULT nextval('mx_test_sequence_1'); -ERROR: cannot execute ADD COLUMN .. DEFAULT nextval('..') command with other subcommands/constraints -HINT: You can issue each subcommand separately -ALTER TABLE test_table ALTER COLUMN id1 SET DEFAULT nextval('mx_test_sequence_1'), ALTER COLUMN id2 DROP DEFAULT; -ERROR: cannot execute ALTER COLUMN COLUMN .. SET DEFAULT nextval('..') command with other subcommands -HINT: You can issue each subcommand separately -ALTER TABLE test_table ADD COLUMN id4 bigserial CHECK (id4 > 0); -ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers -\c - - - :worker_1_port -\ds - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_test_sequence_0 | sequence | postgres - public | mx_test_sequence_1 | sequence | postgres - public | mx_test_table_col_3_seq | sequence | postgres - public | sequence_rollback | sequence | postgres - public | sequence_rollback(citus_backup_0) | sequence | postgres - public | user_defined_seq | sequence | postgres -(6 rows) - -\c - - - :master_port -CREATE SEQUENCE local_sequence; --- verify that DROP SEQUENCE will propagate the command to workers for --- the distributed sequences mx_test_sequence_0 and mx_test_sequence_1 -DROP SEQUENCE mx_test_sequence_0, mx_test_sequence_1, local_sequence CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to default value for column id2 of table test_table -drop cascades to default value for column id of table test_table -\c - - - :worker_1_port -\ds - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_test_table_col_3_seq | sequence | postgres - public | sequence_rollback | sequence | postgres - public | sequence_rollback(citus_backup_0) | sequence | postgres - public | user_defined_seq | sequence | postgres -(4 rows) - -\c - - - :master_port -DROP TABLE test_table CASCADE; --- Cleanup -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE mx_test_schema_2.mx_table_2 CASCADE; -NOTICE: drop cascades to constraint mx_fk_constraint_2 on table mx_test_schema_1.mx_table_1 -DROP TABLE mx_test_schema_1.mx_table_1 CASCADE; -DROP TABLE mx_testing_schema.mx_test_table; -DROP TABLE mx_ref; -DROP TABLE dist_table_1, dist_table_2; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO off; -- for enterprise -CREATE USER non_super_metadata_user; -SET citus.enable_ddl_propagation TO on; -RESET client_min_messages; -SELECT run_command_on_workers('CREATE USER non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - -GRANT EXECUTE ON FUNCTION start_metadata_sync_to_node(text,int) TO non_super_metadata_user; -GRANT EXECUTE ON FUNCTION stop_metadata_sync_to_node(text,int,bool) TO non_super_metadata_user; -GRANT ALL ON pg_dist_node TO non_super_metadata_user; -GRANT ALL ON pg_dist_local_group TO non_super_metadata_user; -GRANT ALL ON SCHEMA citus TO non_super_metadata_user; -GRANT INSERT ON ALL TABLES IN SCHEMA citus TO non_super_metadata_user; -GRANT USAGE ON SCHEMA mx_testing_schema TO non_super_metadata_user; -GRANT USAGE ON SCHEMA mx_testing_schema_2 TO non_super_metadata_user; -GRANT USAGE ON SCHEMA mx_test_schema_1 TO non_super_metadata_user; -GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_super_metadata_user; -SELECT run_command_on_workers('GRANT ALL ON pg_dist_node TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT ALL ON pg_dist_local_group TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT ALL ON SCHEMA citus TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('ALTER SEQUENCE user_defined_seq OWNER TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"ALTER SEQUENCE") - (localhost,57638,t,"ALTER SEQUENCE") -(2 rows) - -SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA citus TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_testing_schema TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_testing_schema_2 TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_1 TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SET ROLE non_super_metadata_user; --- user must be super user stop/start metadata -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -ERROR: operation is not allowed -HINT: Run the command with a superuser. -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -ERROR: operation is not allowed -HINT: Run the command with a superuser. -RESET ROLE; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -RESET citus.shard_count; -RESET citus.shard_replication_factor; -ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id; -ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id; -ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; -ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; --- Activate them at the end -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index f5882e5e7..bf09ae02c 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -58,8 +58,6 @@ CREATE OPERATOR citus_mx_test_schema.=== ( ); SET search_path TO public; SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset \if :server_version_ge_17 @@ -67,12 +65,10 @@ SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 -- Relevant PG commit: -- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset -\elif :server_version_ge_16 +\else -- In PG16, read-only server settings lc_collate and lc_ctype are removed -- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982 SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset -\else -SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset \endif CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale); CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index 1a885f120..3ba366f62 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -1,6 +1,12 @@ -- -- Hide shard names on MX worker nodes -- +-- PostgreSQL 18 planner changes (probably AIO and updated cost model) make +-- sequential scans cheaper, so the psql `\d table`-style query that uses a +-- regex on `pg_class.relname` no longer chooses an index scan. This causes +-- a plan difference. +-- Alternative test output can be removed when we drop PG17 support +-- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1130000; -- make sure that the signature of the citus_table_is_visible -- and pg_table_is_visible are the same since the logic @@ -471,10 +477,6 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name test_table_2_1130000 (4 rows) --- PG16 added one more backend type B_STANDALONE_BACKEND --- and also alphabetized the backend types, hence the orders changed --- Relevant PG16 commit: --- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690 -- Relevant Pg17 commit: -- https://github.com/postgres/postgres/commit/067701f57758f9baed5bd9d868539738d77bfa92 -- Relevant PG18 commit: @@ -482,7 +484,6 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset \if :server_version_ge_18 SELECT 1 AS client_backend \gset SELECT 5 AS bgworker \gset @@ -491,14 +492,10 @@ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \g SELECT 1 AS client_backend \gset SELECT 4 AS bgworker \gset SELECT 5 AS walsender \gset -\elif :server_version_ge_16 +\else SELECT 4 AS client_backend \gset SELECT 5 AS bgworker \gset SELECT 12 AS walsender \gset -\else - SELECT 3 AS client_backend \gset - SELECT 4 AS bgworker \gset - SELECT 9 AS walsender \gset \endif -- say, we set it to bgworker -- the shards and indexes do not show up diff --git a/src/test/regress/expected/multi_mx_hide_shard_names_0.out b/src/test/regress/expected/multi_mx_hide_shard_names_0.out index 21ae97d5a..03960e4f0 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names_0.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names_0.out @@ -1,6 +1,12 @@ -- -- Hide shard names on MX worker nodes -- +-- PostgreSQL 18 planner changes (probably AIO and updated cost model) make +-- sequential scans cheaper, so the psql `\d table`-style query that uses a +-- regex on `pg_class.relname` no longer chooses an index scan. This causes +-- a plan difference. +-- Alternative test output can be removed when we drop PG17 support +-- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1130000; -- make sure that the signature of the citus_table_is_visible -- and pg_table_is_visible are the same since the logic @@ -472,10 +478,6 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name test_table_2_1130000 (4 rows) --- PG16 added one more backend type B_STANDALONE_BACKEND --- and also alphabetized the backend types, hence the orders changed --- Relevant PG16 commit: --- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690 -- Relevant Pg17 commit: -- https://github.com/postgres/postgres/commit/067701f57758f9baed5bd9d868539738d77bfa92 -- Relevant PG18 commit: @@ -483,7 +485,6 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset \if :server_version_ge_18 SELECT 1 AS client_backend \gset SELECT 5 AS bgworker \gset @@ -492,14 +493,10 @@ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \g SELECT 1 AS client_backend \gset SELECT 4 AS bgworker \gset SELECT 5 AS walsender \gset -\elif :server_version_ge_16 +\else SELECT 4 AS client_backend \gset SELECT 5 AS bgworker \gset SELECT 12 AS walsender \gset -\else - SELECT 3 AS client_backend \gset - SELECT 4 AS bgworker \gset - SELECT 9 AS walsender \gset \endif -- say, we set it to bgworker -- the shards and indexes do not show up diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out deleted file mode 100644 index 62271f9a7..000000000 --- a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out +++ /dev/null @@ -1,167 +0,0 @@ --- --- MULTI_MX_INSERT_SELECT_REPARTITION --- --- Test behaviour of repartitioned INSERT ... SELECT in MX setup --- --- This test file has an alternative output because of the change in the --- display of SQL-standard function's arguments in INSERT/SELECT in PG15. --- The alternative output can be deleted when we drop support for PG14 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15; - server_version_ge_15 ---------------------------------------------------------------------- - f -(1 row) - -CREATE SCHEMA multi_mx_insert_select_repartition; -SET search_path TO multi_mx_insert_select_repartition; -SET citus.next_shard_id TO 4213581; -SET citus.shard_replication_factor TO 1; -SET citus.shard_count TO 4; -CREATE TABLE source_table(a int, b int); -SELECT create_distributed_table('source_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO source_table SELECT floor(i/4), i*i FROM generate_series(1, 20) i; -SET citus.shard_count TO 3; -CREATE TABLE target_table(a int, b int); -SELECT create_distributed_table('target_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE FUNCTION square(int) RETURNS INT - AS $$ SELECT $1 * $1 $$ - LANGUAGE SQL; -select create_distributed_function('square(int)'); -NOTICE: procedure multi_mx_insert_select_repartition.square is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - -select public.colocate_proc_with_table('square', 'source_table'::regclass, 0); - colocate_proc_with_table ---------------------------------------------------------------------- - -(1 row) - --- Test along with function delegation --- function delegation only happens for "SELECT f()", and we don't use --- repartitioned INSERT/SELECT when task count is 1, so the following --- should go via coordinator -EXPLAIN (costs off) INSERT INTO target_table(a) SELECT square(4); - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (Citus INSERT ... SELECT) - INSERT/SELECT method: pull to coordinator - -> Result -(3 rows) - -INSERT INTO target_table(a) SELECT square(4); -SELECT * FROM target_table; - a | b ---------------------------------------------------------------------- - 16 | -(1 row) - -TRUNCATE target_table; --- --- Test repartitioned INSERT/SELECT from MX worker --- -\c - - - :worker_1_port -SET search_path TO multi_mx_insert_select_repartition; -EXPLAIN (costs off) INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (Citus INSERT ... SELECT) - INSERT/SELECT method: repartition - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: a - -> Seq Scan on source_table_4213581 source_table -(10 rows) - -INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a; -SET citus.log_local_commands to on; --- INSERT .. SELECT via repartitioning with local execution -BEGIN; - select count(*) from source_table WHERE a = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE (a OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 4 -(1 row) - - -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky - SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; - insert into target_table SELECT a*2 FROM source_table RETURNING a; -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a -NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a - a ---------------------------------------------------------------------- - 0 - 0 - 0 - 2 - 2 - 2 - 2 - 4 - 4 - 4 - 4 - 6 - 6 - 6 - 6 - 8 - 8 - 8 - 8 - 10 -(20 rows) - -ROLLBACK; -BEGIN; - select count(*) from source_table WHERE a = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE (a OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 4 -(1 row) - - insert into target_table SELECT a FROM source_table LIMIT 10; -NOTICE: executing the command locally: SELECT a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true LIMIT '10'::bigint -NOTICE: executing the copy locally for shard xxxxx -ROLLBACK; -\c - - - :master_port -SET search_path TO multi_mx_insert_select_repartition; -SELECT * FROM target_table ORDER BY a; - a | b ---------------------------------------------------------------------- - 0 | 9 - 1 | 49 - 2 | 121 - 3 | 225 - 4 | 361 - 5 | 400 -(6 rows) - -RESET client_min_messages; -\set VERBOSITY terse -DROP SCHEMA multi_mx_insert_select_repartition CASCADE; -NOTICE: drop cascades to 3 other objects diff --git a/src/test/regress/expected/multi_outer_join_columns.out b/src/test/regress/expected/multi_outer_join_columns.out index 3d32a8ef5..095f93280 100644 --- a/src/test/regress/expected/multi_outer_join_columns.out +++ b/src/test/regress/expected/multi_outer_join_columns.out @@ -2,16 +2,6 @@ --- do not cause issues for the postgres planner, in particular postgres versions 16+, where the --- varnullingrels field of a VAR node may contain relids of join relations that can make the var --- NULL; in a rewritten distributed query without a join such relids do not have a meaning. --- This test has an alternative goldfile because of the following feature in Postgres 16: --- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; - server_version_ge_16 ---------------------------------------------------------------------- - t -(1 row) - CREATE SCHEMA outer_join_columns_testing; SET search_path to 'outer_join_columns_testing'; SET citus.next_shard_id TO 30070000; diff --git a/src/test/regress/expected/multi_outer_join_columns_1.out b/src/test/regress/expected/multi_outer_join_columns_1.out deleted file mode 100644 index 4df79cc92..000000000 --- a/src/test/regress/expected/multi_outer_join_columns_1.out +++ /dev/null @@ -1,422 +0,0 @@ ---- Test for verifying that column references (var nodes) in targets that cannot be pushed down ---- do not cause issues for the postgres planner, in particular postgres versions 16+, where the ---- varnullingrels field of a VAR node may contain relids of join relations that can make the var ---- NULL; in a rewritten distributed query without a join such relids do not have a meaning. --- This test has an alternative goldfile because of the following feature in Postgres 16: --- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; - server_version_ge_16 ---------------------------------------------------------------------- - f -(1 row) - -CREATE SCHEMA outer_join_columns_testing; -SET search_path to 'outer_join_columns_testing'; -SET citus.next_shard_id TO 30070000; -SET citus.shard_replication_factor TO 1; -SET citus.enable_local_execution TO ON; -CREATE TABLE t1 (id INT PRIMARY KEY); -INSERT INTO t1 VALUES (1), (2); -CREATE TABLE t2 (id INT, account_id INT, a2 INT, PRIMARY KEY(id, account_id)); -INSERT INTO t2 VALUES (3, 1, 10), (4, 2, 20), (5, 1, NULL); -SELECT create_distributed_table('t1', 'id'); -NOTICE: Copying data from local table... -NOTICE: copying the data has completed -DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$outer_join_columns_testing.t1$$) - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table('t2', 'account_id'); -NOTICE: Copying data from local table... -NOTICE: copying the data has completed -DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$outer_join_columns_testing.t2$$) - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- Test the issue seen in #7705; a target expression with --- a window function that cannot be pushed down because the --- partion by is not on the distribution column also includes --- a column from the inner side of a left outer join, which --- produces a non-empty varnullingrels set in PG 16 (and higher) -SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; - id | max ---------------------------------------------------------------------- - 1 | 10 - 2 | 20 - 1 | -(3 rows) - -select public.explain_filter(' -EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) -SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id -', true); - explain_filter ---------------------------------------------------------------------- - WindowAgg - Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3 - -> Sort - Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max - Sort Key: remote_scan.worker_column_3 - -> Custom Scan (Citus Adaptive) - Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery - Node: host=localhost port=xxxxx dbname=regression - -> Hash Right Join - Output: t1.id, t2.a2, t2.id - Inner Unique: true - Hash Cond: (t2.account_id = t1.id) - -> Seq Scan on outer_join_columns_testing.t2_30070004 t2 - Output: t2.id, t2.account_id, t2.a2 - -> Hash - Output: t1.id - -> Seq Scan on outer_join_columns_testing.t1_30070000 t1 - Output: t1.id -(22 rows) - -SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) -FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id; - id | max ---------------------------------------------------------------------- - 1 | 10 - 2 | 20 - 1 | -(3 rows) - -select public.explain_filter(' -EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) -SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) -FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id -', true); - explain_filter ---------------------------------------------------------------------- - WindowAgg - Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3 - -> Sort - Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max - Sort Key: remote_scan.worker_column_3 - -> Custom Scan (Citus Adaptive) - Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t2_30070004 t2 RIGHT JOIN outer_join_columns_testing.t1_30070000 t1 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery - Node: host=localhost port=xxxxx dbname=regression - -> Hash Right Join - Output: t1.id, t2.a2, t2.id - Inner Unique: true - Hash Cond: (t2.account_id = t1.id) - -> Seq Scan on outer_join_columns_testing.t2_30070004 t2 - Output: t2.id, t2.account_id, t2.a2 - -> Hash - Output: t1.id - -> Seq Scan on outer_join_columns_testing.t1_30070000 t1 - Output: t1.id -(22 rows) - -SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; - id | max ---------------------------------------------------------------------- - 1 | - 1 | 10 - 2 | 20 -(3 rows) - -select public.explain_filter(' -EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) -SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id -', true); - explain_filter ---------------------------------------------------------------------- - HashAggregate - Output: remote_scan.id, (max(remote_scan.max) OVER (?)), remote_scan.worker_column_3 - Group Key: remote_scan.id, max(remote_scan.max) OVER (?) - -> WindowAgg - Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3 - -> Sort - Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max - Sort Key: remote_scan.worker_column_3 - -> Custom Scan (Citus Adaptive) - Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery - Node: host=localhost port=xxxxx dbname=regression - -> Hash Right Join - Output: t1.id, t2.a2, t2.id - Inner Unique: true - Hash Cond: (t2.account_id = t1.id) - -> Seq Scan on outer_join_columns_testing.t2_30070004 t2 - Output: t2.id, t2.account_id, t2.a2 - -> Hash - Output: t1.id - -> Seq Scan on outer_join_columns_testing.t1_30070000 t1 - Output: t1.id -(25 rows) - -CREATE SEQUENCE test_seq START 101; -CREATE OR REPLACE FUNCTION TEST_F(int) returns INT language sql stable as $$ select $1 + 42; $$ ; --- Issue #7705 also occurs if a target expression includes a column --- of a distributed table that is on the inner side of a left outer --- join and a call to nextval(), because nextval() cannot be pushed --- down, and must be run on the coordinator -SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int) -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id -ORDER BY t1.id; - id | test_f ---------------------------------------------------------------------- - 1 | 153 - 1 | - 2 | 165 -(3 rows) - -EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) -SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int) -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id -ORDER BY t1.id; - QUERY PLAN ---------------------------------------------------------------------- - Result - Output: remote_scan.id, ((remote_scan.test_f + (nextval('test_seq'::regclass))::integer) + 42) - -> Sort - Output: remote_scan.id, remote_scan.test_f - Sort Key: remote_scan.id - -> Custom Scan (Citus Adaptive) - Output: remote_scan.id, remote_scan.test_f - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Query: SELECT worker_column_1 AS id, worker_column_2 AS test_f FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery - Node: host=localhost port=xxxxx dbname=regression - -> Hash Right Join - Output: t1.id, t2.a2 - Inner Unique: true - Hash Cond: (t2.account_id = t1.id) - -> Seq Scan on outer_join_columns_testing.t2_30070004 t2 - Output: t2.id, t2.account_id, t2.a2 - -> Hash - Output: t1.id - -> Seq Scan on outer_join_columns_testing.t1_30070000 t1 - Output: t1.id -(22 rows) - -SELECT t1.id, CASE nextval('test_seq') % 2 = 0 WHEN true THEN t2.a2 ELSE 1 END -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id -ORDER BY t1.id; - id | case ---------------------------------------------------------------------- - 1 | 10 - 1 | 1 - 2 | 20 -(3 rows) - -EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) -SELECT t1.id, CASE nextval('test_seq') %2 = 0 WHEN true THEN t2.a2 ELSE 1 END -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id -ORDER BY t1.id; - QUERY PLAN ---------------------------------------------------------------------- - Result - Output: remote_scan.id, CASE ((nextval('test_seq'::regclass) % '2'::bigint) = 0) WHEN CASE_TEST_EXPR THEN remote_scan."case" ELSE 1 END - -> Sort - Output: remote_scan.id, remote_scan."case" - Sort Key: remote_scan.id - -> Custom Scan (Citus Adaptive) - Output: remote_scan.id, remote_scan."case" - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Query: SELECT worker_column_1 AS id, worker_column_2 AS "case" FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery - Node: host=localhost port=xxxxx dbname=regression - -> Hash Right Join - Output: t1.id, t2.a2 - Inner Unique: true - Hash Cond: (t2.account_id = t1.id) - -> Seq Scan on outer_join_columns_testing.t2_30070004 t2 - Output: t2.id, t2.account_id, t2.a2 - -> Hash - Output: t1.id - -> Seq Scan on outer_join_columns_testing.t1_30070000 t1 - Output: t1.id -(22 rows) - --- Issue #7787: count distinct of a column from the inner side of a --- left outer join will have a non-empty varnullingrels in the query --- tree returned by Postgres 16+, so ensure this is not reflected in --- the worker subquery constructed by Citus; it has just one relation, --- for the pushed down subquery. -SELECT COUNT(DISTINCT a2) -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; - count ---------------------------------------------------------------------- - 2 -(1 row) - -EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) -SELECT COUNT(DISTINCT a2) -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; - QUERY PLAN ---------------------------------------------------------------------- - Aggregate - Output: count(DISTINCT remote_scan.count) - -> Custom Scan (Citus Adaptive) - Output: remote_scan.count - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Query: SELECT worker_column_1 AS count FROM (SELECT t2.a2 AS worker_column_1 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery GROUP BY worker_column_1 - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: t2.a2 - Group Key: t2.a2 - -> Hash Right Join - Output: t2.a2 - Inner Unique: true - Hash Cond: (t2.account_id = t1.id) - -> Seq Scan on outer_join_columns_testing.t2_30070004 t2 - Output: t2.id, t2.account_id, t2.a2 - -> Hash - Output: t1.id - -> Seq Scan on outer_join_columns_testing.t1_30070000 t1 - Output: t1.id -(22 rows) - --- Issue #7787 also occurs with a HAVING clause -SELECT 1 -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id -HAVING COUNT(DISTINCT a2) > 1; - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -select public.explain_filter(' -EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) -SELECT 1 -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id -HAVING COUNT(DISTINCT a2) > 1; -', true); - explain_filter ---------------------------------------------------------------------- - Aggregate - Output: remote_scan."?column?" - Filter: (count(DISTINCT remote_scan.worker_column_2) > 1) - -> Custom Scan (Citus Adaptive) - Output: remote_scan."?column?", remote_scan.worker_column_2 - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Query: SELECT 1, worker_column_1 AS worker_column_2 FROM (SELECT t2.a2 AS worker_column_1 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery GROUP BY worker_column_1 - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: 1, t2.a2 - Group Key: t2.a2 - -> Hash Right Join - Output: t2.a2 - Inner Unique: true - Hash Cond: (t2.account_id = t1.id) - -> Seq Scan on outer_join_columns_testing.t2_30070004 t2 - Output: t2.id, t2.account_id, t2.a2 - -> Hash - Output: t1.id - -> Seq Scan on outer_join_columns_testing.t1_30070000 t1 - Output: t1.id -(23 rows) - --- Check right outer join -SELECT COUNT(DISTINCT a2) -FROM t2 RIGHT OUTER JOIN t1 ON t2.account_id = t1.id; - count ---------------------------------------------------------------------- - 2 -(1 row) - -EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) -SELECT COUNT(DISTINCT a2) -FROM t2 RIGHT OUTER JOIN t1 ON t2.account_id = t1.id; - QUERY PLAN ---------------------------------------------------------------------- - Aggregate - Output: count(DISTINCT remote_scan.count) - -> Custom Scan (Citus Adaptive) - Output: remote_scan.count - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Query: SELECT worker_column_1 AS count FROM (SELECT t2.a2 AS worker_column_1 FROM (outer_join_columns_testing.t2_30070004 t2 RIGHT JOIN outer_join_columns_testing.t1_30070000 t1 ON ((t2.account_id OPERATOR(pg_catalog.=) t1.id)))) worker_subquery GROUP BY worker_column_1 - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: t2.a2 - Group Key: t2.a2 - -> Hash Right Join - Output: t2.a2 - Inner Unique: true - Hash Cond: (t2.account_id = t1.id) - -> Seq Scan on outer_join_columns_testing.t2_30070004 t2 - Output: t2.id, t2.account_id, t2.a2 - -> Hash - Output: t1.id - -> Seq Scan on outer_join_columns_testing.t1_30070000 t1 - Output: t1.id -(22 rows) - --- Check both count distinct and having clause -SELECT COUNT(DISTINCT a2) -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id -HAVING COUNT(DISTINCT t2.id) > 1; - count ---------------------------------------------------------------------- - 2 -(1 row) - -EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) -SELECT COUNT(DISTINCT a2) -FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id -HAVING COUNT(DISTINCT t2.id) > 1; - QUERY PLAN ---------------------------------------------------------------------- - Aggregate - Output: count(DISTINCT remote_scan.count) - Filter: (count(DISTINCT remote_scan.worker_column_2) > 1) - -> Custom Scan (Citus Adaptive) - Output: remote_scan.count, remote_scan.worker_column_2 - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Query: SELECT worker_column_1 AS count, worker_column_2 FROM (SELECT t2.a2 AS worker_column_1, t2.id AS worker_column_2 FROM (outer_join_columns_testing.t1_30070000 t1 LEFT JOIN outer_join_columns_testing.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery GROUP BY worker_column_1, worker_column_2 - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Output: t2.a2, t2.id - Group Key: t2.a2, t2.id - -> Hash Right Join - Output: t2.a2, t2.id - Inner Unique: true - Hash Cond: (t2.account_id = t1.id) - -> Seq Scan on outer_join_columns_testing.t2_30070004 t2 - Output: t2.id, t2.account_id, t2.a2 - -> Hash - Output: t1.id - -> Seq Scan on outer_join_columns_testing.t1_30070000 t1 - Output: t1.id -(23 rows) - ---- cleanup -\set VERBOSITY TERSE -DROP SCHEMA outer_join_columns_testing CASCADE; -NOTICE: drop cascades to 4 other objects -RESET all; diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index e6b5ac9a9..a096d82e8 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -348,8 +348,6 @@ SELECT * FROM nation_hash ORDER BY 1,2,3,4; --test COLLATION with schema SET search_path TO public; SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset \if :server_version_ge_17 @@ -357,12 +355,10 @@ SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 -- Relevant PG commit: -- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset -\elif :server_version_ge_16 +\else -- In PG16, read-only server settings lc_collate and lc_ctype are removed -- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982 SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset -\else -SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset \endif CREATE COLLATION test_schema_support.english (LOCALE = :current_locale); \c - - - :master_port diff --git a/src/test/regress/expected/pg16.out b/src/test/regress/expected/pg16.out index a172c54c0..00a082c13 100644 --- a/src/test/regress/expected/pg16.out +++ b/src/test/regress/expected/pg16.out @@ -1,13 +1,6 @@ -- -- PG16 -- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset -\if :server_version_ge_16 -\else -\q -\endif CREATE SCHEMA pg16; SET search_path TO pg16; SET citus.next_shard_id TO 950000; diff --git a/src/test/regress/expected/pg16_0.out b/src/test/regress/expected/pg16_0.out deleted file mode 100644 index 730c916ca..000000000 --- a/src/test/regress/expected/pg16_0.out +++ /dev/null @@ -1,9 +0,0 @@ --- --- PG16 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset -\if :server_version_ge_16 -\else -\q diff --git a/src/test/regress/expected/publication_0.out b/src/test/regress/expected/publication_0.out deleted file mode 100644 index e768a1d41..000000000 --- a/src/test/regress/expected/publication_0.out +++ /dev/null @@ -1,276 +0,0 @@ -CREATE SCHEMA publication; -CREATE SCHEMA "publication-1"; -SET search_path TO publication; -SET citus.shard_replication_factor TO 1; -CREATE OR REPLACE FUNCTION activate_node_snapshot() - RETURNS text[] - LANGUAGE C STRICT - AS 'citus'; -COMMENT ON FUNCTION activate_node_snapshot() - IS 'commands to activate node snapshot'; -\c - - - :worker_1_port -SET citus.enable_ddl_propagation TO off; -CREATE OR REPLACE FUNCTION activate_node_snapshot() - RETURNS text[] - LANGUAGE C STRICT - AS 'citus'; -COMMENT ON FUNCTION activate_node_snapshot() - IS 'commands to activate node snapshot'; -\c - - - :worker_2_port -SET citus.enable_ddl_propagation TO off; -CREATE OR REPLACE FUNCTION activate_node_snapshot() - RETURNS text[] - LANGUAGE C STRICT - AS 'citus'; -COMMENT ON FUNCTION activate_node_snapshot() - IS 'commands to activate node snapshot'; --- create some publications with conflicting names on worker node --- publication will be different from coordinator -CREATE PUBLICATION "pub-all"; --- publication will be same as coordinator -CREATE PUBLICATION "pub-all-insertupdateonly" FOR ALL TABLES WITH (publish = 'insert, update');; -\c - - - :master_port -SET search_path TO publication; -SET citus.shard_replication_factor TO 1; --- do not create publications on worker 2 initially -SELECT citus_remove_node('localhost', :worker_2_port); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - --- create a non-distributed publication -SET citus.enable_ddl_propagation TO off; -CREATE PUBLICATION pubnotdistributed WITH (publish = 'delete'); -RESET citus.enable_ddl_propagation; -ALTER PUBLICATION pubnotdistributed SET (publish = 'truncate'); --- create regular, distributed publications -CREATE PUBLICATION pubempty; -CREATE PUBLICATION pubinsertonly WITH (publish = 'insert'); -CREATE PUBLICATION "pub-all" FOR ALL TABLES; -CREATE PUBLICATION "pub-all-insertupdateonly" FOR ALL TABLES WITH (publish = 'insert, update'); --- add worker 2 with publications -SELECT 1 FROM citus_add_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- Check publications on all the nodes, if we see the same publication name twice then its definition differs --- Note that publications are special in the sense that the coordinator object might differ from --- worker objects due to the presence of regular tables. -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' ORDER BY 1) s$$) - ORDER BY c) s; - c ---------------------------------------------------------------------- - SELECT worker_create_or_replace_object('CREATE PUBLICATION "pub-all" FOR ALL TABLES WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')'); - SELECT worker_create_or_replace_object('CREATE PUBLICATION "pub-all-insertupdateonly" FOR ALL TABLES WITH (publish_via_partition_root = ''false'', publish = ''insert, update'')'); - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubempty WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')'); - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubinsertonly WITH (publish_via_partition_root = ''false'', publish = ''insert'')'); -(4 rows) - -CREATE TABLE test (x int primary key, y int, "column-1" int, doc xml); -CREATE TABLE "test-pubs" (x int primary key, y int, "column-1" int); -CREATE TABLE "publication-1"."test-pubs" (x int primary key, y int, "column-1" int); --- various operations on a publication with only local tables -CREATE PUBLICATION pubtables_orig FOR TABLE test, "test-pubs", "publication-1"."test-pubs" WITH (publish = 'insert, truncate'); -ALTER PUBLICATION pubtables_orig DROP TABLE test; -ALTER PUBLICATION pubtables_orig ADD TABLE test; --- publication will be empty on worker nodes, since all tables are local -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubtables%' ORDER BY 1) s$$) - ORDER BY c) s; - c ---------------------------------------------------------------------- - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubtables_orig WITH (publish_via_partition_root = ''false'', publish = ''insert, truncate'')'); -(1 row) - --- distribute a table and create a tenant schema, creating a mixed publication -SELECT create_distributed_table('test','x', colocate_with := 'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SET citus.enable_schema_based_sharding TO ON; -CREATE SCHEMA citus_schema_1; -CREATE TABLE citus_schema_1.test (x int primary key, y int, "column-1" int, doc xml); -SET citus.enable_schema_based_sharding TO OFF; -ALTER PUBLICATION pubtables_orig ADD TABLE citus_schema_1.test; --- some generic operations -ALTER PUBLICATION pubtables_orig RENAME TO pubtables; -ALTER PUBLICATION pubtables SET (publish = 'insert, update, delete'); -ALTER PUBLICATION pubtables OWNER TO postgres; -ALTER PUBLICATION pubtables SET (publish = 'inert, update, delete'); -ERROR: unrecognized "publish" value: "inert" -ALTER PUBLICATION pubtables ADD TABLE notexist; -ERROR: relation "notexist" does not exist --- operations with a distributed table -ALTER PUBLICATION pubtables DROP TABLE test; -ALTER PUBLICATION pubtables ADD TABLE test; -ALTER PUBLICATION pubtables SET TABLE test, "test-pubs", "publication-1"."test-pubs", citus_schema_1.test; --- operations with a tenant schema table -ALTER PUBLICATION pubtables DROP TABLE citus_schema_1.test; -ALTER PUBLICATION pubtables ADD TABLE citus_schema_1.test; -ALTER PUBLICATION pubtables SET TABLE test, "test-pubs", "publication-1"."test-pubs", citus_schema_1.test; --- operations with a local table in a mixed publication -ALTER PUBLICATION pubtables DROP TABLE "test-pubs"; -ALTER PUBLICATION pubtables ADD TABLE "test-pubs"; -SELECT create_distributed_table('"test-pubs"', 'x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- test and test-pubs will show up in worker nodes -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubtables%' ORDER BY 1) s$$) - ORDER BY c) s; - c ---------------------------------------------------------------------- - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubtables FOR TABLE publication.test, citus_schema_1.test, publication."test-pubs" WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete'')'); -(1 row) - --- operations with a strangely named distributed table in a mixed publication -ALTER PUBLICATION pubtables DROP TABLE "test-pubs"; -ALTER PUBLICATION pubtables ADD TABLE "test-pubs"; --- create a publication with distributed and local tables -DROP PUBLICATION pubtables; -CREATE PUBLICATION pubtables FOR TABLE test, "test-pubs", "publication-1"."test-pubs", citus_schema_1.test; --- change distributed tables -SELECT alter_distributed_table('test', shard_count := 5, cascade_to_colocated := true); -NOTICE: creating a new table for publication.test -NOTICE: moving the data of publication.test -NOTICE: dropping the old publication.test -NOTICE: renaming the new table to publication.test -NOTICE: creating a new table for publication."test-pubs" -NOTICE: moving the data of publication."test-pubs" -NOTICE: dropping the old publication."test-pubs" -NOTICE: renaming the new table to publication."test-pubs" - alter_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT undistribute_table('test'); -NOTICE: creating a new table for publication.test -NOTICE: moving the data of publication.test -NOTICE: dropping the old publication.test -NOTICE: renaming the new table to publication.test - undistribute_table ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_add_local_table_to_metadata('test'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table_concurrently('test', 'x'); - create_distributed_table_concurrently ---------------------------------------------------------------------- - -(1 row) - -SELECT undistribute_table('"test-pubs"'); -NOTICE: creating a new table for publication."test-pubs" -NOTICE: moving the data of publication."test-pubs" -NOTICE: dropping the old publication."test-pubs" -NOTICE: renaming the new table to publication."test-pubs" - undistribute_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_reference_table('"test-pubs"'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - --- publications are unchanged despite various tranformations -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubtables%' ORDER BY 1) s$$) - ORDER BY c) s; - c ---------------------------------------------------------------------- - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubtables FOR TABLE citus_schema_1.test, publication.test, publication."test-pubs" WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')'); -(1 row) - --- partitioned table -CREATE TABLE testpub_partitioned (a int, b text, c text) PARTITION BY RANGE (a); -CREATE TABLE testpub_partitioned_0 PARTITION OF testpub_partitioned FOR VALUES FROM (1) TO (10); -ALTER TABLE testpub_partitioned_0 ADD PRIMARY KEY (a); -ALTER TABLE testpub_partitioned_0 REPLICA IDENTITY USING INDEX testpub_partitioned_0_pkey; -CREATE TABLE testpub_partitioned_1 PARTITION OF testpub_partitioned FOR VALUES FROM (11) TO (20); -ALTER TABLE testpub_partitioned_1 ADD PRIMARY KEY (a); -ALTER TABLE testpub_partitioned_1 REPLICA IDENTITY USING INDEX testpub_partitioned_1_pkey; -CREATE PUBLICATION pubpartitioned FOR TABLE testpub_partitioned WITH (publish_via_partition_root = 'true'); -SELECT create_distributed_table('testpub_partitioned', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubpartitioned%' ORDER BY 1) s$$) - ORDER BY c) s; - c ---------------------------------------------------------------------- - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubpartitioned FOR TABLE publication.testpub_partitioned WITH (publish_via_partition_root = ''true'', publish = ''insert, update, delete, truncate'')'); -(1 row) - -DROP PUBLICATION pubpartitioned; -CREATE PUBLICATION pubpartitioned FOR TABLE testpub_partitioned WITH (publish_via_partition_root = 'true'); --- add a partition -ALTER PUBLICATION pubpartitioned ADD TABLE testpub_partitioned_1; -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLIATION%' AND c LIKE '%pubpartitioned%' ORDER BY 1) s$$) - ORDER BY c) s; -ERROR: malformed array literal: "" -DETAIL: Array value must start with "{" or dimension information. --- make sure we can sync all the publication metadata -SELECT start_metadata_sync_to_all_nodes(); - start_metadata_sync_to_all_nodes ---------------------------------------------------------------------- - t -(1 row) - -DROP PUBLICATION pubempty; -DROP PUBLICATION pubtables; -DROP PUBLICATION pubinsertonly; -DROP PUBLICATION "pub-all-insertupdateonly"; -DROP PUBLICATION "pub-all"; -DROP PUBLICATION pubpartitioned; -DROP PUBLICATION pubnotdistributed; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -SET client_min_messages TO ERROR; -DROP SCHEMA publication CASCADE; -DROP SCHEMA "publication-1" CASCADE; -DROP SCHEMA citus_schema_1 CASCADE; -SELECT public.wait_for_resource_cleanup(); - wait_for_resource_cleanup ---------------------------------------------------------------------- - -(1 row) - -\q diff --git a/src/test/regress/expected/single_node_0.out b/src/test/regress/expected/single_node_0.out deleted file mode 100644 index a94c02951..000000000 --- a/src/test/regress/expected/single_node_0.out +++ /dev/null @@ -1,2582 +0,0 @@ --- --- SINGLE_NODE --- --- This test file has an alternative output because of the change in the --- display of SQL-standard function's arguments in INSERT/SELECT in PG15. --- The alternative output can be deleted when we drop support for PG14 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15; - server_version_ge_15 ---------------------------------------------------------------------- - f -(1 row) - -CREATE SCHEMA single_node; -SET search_path TO single_node; -SET citus.shard_count TO 4; -SET citus.shard_replication_factor TO 1; -SET citus.next_shard_id TO 90630500; --- Ensure tuple data in explain analyze output is the same on all PG versions -SET citus.enable_binary_protocol = TRUE; --- do not cache any connections for now, will enable it back soon -ALTER SYSTEM SET citus.max_cached_conns_per_worker TO 0; --- adding the coordinator as inactive is disallowed -SELECT 1 FROM master_add_inactive_node('localhost', :master_port, groupid => 0); -ERROR: coordinator node cannot be added as inactive node --- before adding a node we are not officially a coordinator -SELECT citus_is_coordinator(); - citus_is_coordinator ---------------------------------------------------------------------- - f -(1 row) - --- idempotently add node to allow this test to run without add_coordinator -SET client_min_messages TO WARNING; -SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- after adding a node we are officially a coordinator -SELECT citus_is_coordinator(); - citus_is_coordinator ---------------------------------------------------------------------- - t -(1 row) - --- coordinator cannot be disabled -SELECT 1 FROM citus_disable_node('localhost', :master_port); -ERROR: cannot change "isactive" field of the coordinator node -RESET client_min_messages; -SELECT 1 FROM master_remove_node('localhost', :master_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT count(*) FROM pg_dist_node; - count ---------------------------------------------------------------------- - 0 -(1 row) - --- there are no workers now, but we should still be able to create Citus tables --- force local execution when creating the index -ALTER SYSTEM SET citus.local_shared_pool_size TO -1; --- Postmaster might not ack SIGHUP signal sent by pg_reload_conf() immediately, --- so we need to sleep for some amount of time to do our best to ensure that --- postmaster reflects GUC changes. -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(0.1); - pg_sleep ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE failover_to_local (a int); -SELECT create_distributed_table('failover_to_local', 'a', shard_count=>32); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE INDEX CONCURRENTLY ON failover_to_local(a); -WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. -If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, -if applicable, and then re-attempt the original command. -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: Consider using a higher value for max_connections --- reset global GUC changes -ALTER SYSTEM RESET citus.local_shared_pool_size; -ALTER SYSTEM RESET citus.max_cached_conns_per_worker; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -CREATE TABLE single_node_nullkey_c1(a int, b int); -SELECT create_distributed_table('single_node_nullkey_c1', null, colocate_with=>'none', distribution_type=>null); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE single_node_nullkey_c2(a int, b int); -SELECT create_distributed_table('single_node_nullkey_c2', null, colocate_with=>'none', distribution_type=>null); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- created on different colocation groups .. -SELECT -( - SELECT colocationid FROM pg_dist_partition - WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass -) -!= -( - SELECT colocationid FROM pg_dist_partition - WHERE logicalrelid = 'single_node.single_node_nullkey_c2'::regclass -); - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- .. but both are associated to coordinator -SELECT groupid = 0 FROM pg_dist_placement -WHERE shardid = ( - SELECT shardid FROM pg_dist_shard - WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass -); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT groupid = 0 FROM pg_dist_placement -WHERE shardid = ( - SELECT shardid FROM pg_dist_shard - WHERE logicalrelid = 'single_node.single_node_nullkey_c2'::regclass -); - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- try creating a single-shard table from a shard relation -SELECT shardid AS round_robin_test_c1_shard_id FROM pg_dist_shard WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass \gset -SELECT create_distributed_table('single_node_nullkey_c1_' || :round_robin_test_c1_shard_id , null, colocate_with=>'none', distribution_type=>null); -ERROR: relation "single_node_nullkey_c1_90630532" is a shard relation --- create a tenant schema on single node setup -SET citus.enable_schema_based_sharding TO ON; -CREATE SCHEMA tenant_1; -CREATE TABLE tenant_1.tbl_1 (a int); --- verify that we recorded tenant_1 in pg_dist_schema -SELECT COUNT(*)=1 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'tenant_1'; - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- verify that tenant_1.tbl_1 is recorded in pg_dist_partition, as a single-shard table -SELECT COUNT(*)=1 FROM pg_dist_partition -WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND - partmethod = 'n' AND repmodel = 's' AND colocationid IS NOT NULL; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -RESET citus.enable_schema_based_sharding; --- Test lazy conversion from Citus local to single-shard tables --- and reference tables, on single node. This means that no shard --- replication should be needed. -CREATE TABLE ref_table_conversion_test ( - a int PRIMARY KEY -); -SELECT citus_add_local_table_to_metadata('ref_table_conversion_test'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - --- save old shardid and placementid -SELECT get_shard_id_for_distribution_column('single_node.ref_table_conversion_test') AS ref_table_conversion_test_old_shard_id \gset -SELECT placementid AS ref_table_conversion_test_old_coord_placement_id FROM pg_dist_placement WHERE shardid = :ref_table_conversion_test_old_shard_id \gset -SELECT create_reference_table('ref_table_conversion_test'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -SELECT public.verify_pg_dist_partition_for_reference_table('single_node.ref_table_conversion_test'); - verify_pg_dist_partition_for_reference_table ---------------------------------------------------------------------- - t -(1 row) - -SELECT public.verify_shard_placements_for_reference_table('single_node.ref_table_conversion_test', - :ref_table_conversion_test_old_shard_id, - :ref_table_conversion_test_old_coord_placement_id); - verify_shard_placements_for_reference_table ---------------------------------------------------------------------- - t -(1 row) - -CREATE TABLE single_shard_conversion_test_1 ( - int_col_1 int PRIMARY KEY, - text_col_1 text UNIQUE, - int_col_2 int -); -SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_1'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - --- save old shardid -SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_1') AS single_shard_conversion_test_1_old_shard_id \gset -SELECT create_distributed_table('single_shard_conversion_test_1', null, colocate_with=>'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_1'); - verify_pg_dist_partition_for_single_shard_table ---------------------------------------------------------------------- - t -(1 row) - -SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_1', :single_shard_conversion_test_1_old_shard_id, true); - verify_shard_placement_for_single_shard_table ---------------------------------------------------------------------- - t -(1 row) - -CREATE TABLE single_shard_conversion_test_2 ( - int_col_1 int -); -SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_2'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - --- save old shardid -SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_2') AS single_shard_conversion_test_2_old_shard_id \gset -SELECT create_distributed_table('single_shard_conversion_test_2', null, colocate_with=>'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_2'); - verify_pg_dist_partition_for_single_shard_table ---------------------------------------------------------------------- - t -(1 row) - -SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_2', :single_shard_conversion_test_2_old_shard_id, true); - verify_shard_placement_for_single_shard_table ---------------------------------------------------------------------- - t -(1 row) - --- make sure that they're created on different colocation groups -SELECT -( - SELECT colocationid FROM pg_dist_partition - WHERE logicalrelid = 'single_node.single_shard_conversion_test_1'::regclass -) -!= -( - SELECT colocationid FROM pg_dist_partition - WHERE logicalrelid = 'single_node.single_shard_conversion_test_2'::regclass -); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SET client_min_messages TO WARNING; -DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2, ref_table_conversion_test, single_shard_conversion_test_1, single_shard_conversion_test_2; -DROP SCHEMA tenant_1 CASCADE; -RESET client_min_messages; --- so that we don't have to update rest of the test output -SET citus.next_shard_id TO 90630500; -CREATE TABLE ref(x int, y int); -SELECT create_reference_table('ref'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node; - groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced ---------------------------------------------------------------------- - 0 | localhost | 57636 | t | t | t | t -(1 row) - -DROP TABLE ref; --- remove the coordinator to try again with create_reference_table -SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0; - master_remove_node ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE loc(x int, y int); -SELECT citus_add_local_table_to_metadata('loc'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node; - groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced ---------------------------------------------------------------------- - 0 | localhost | 57636 | t | t | t | t -(1 row) - -DROP TABLE loc; --- remove the coordinator to try again with create_distributed_table -SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0; - master_remove_node ---------------------------------------------------------------------- - -(1 row) - --- verify the coordinator gets auto added with the localhost guc -ALTER SYSTEM SET citus.local_hostname TO '127.0.0.1'; --although not a hostname, should work for connecting locally -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(.1); -- wait to make sure the config has changed before running the GUC - pg_sleep ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE test(x int, y int); -SELECT create_distributed_table('test','x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node; - groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced ---------------------------------------------------------------------- - 0 | 127.0.0.1 | 57636 | t | t | t | t -(1 row) - -DROP TABLE test; --- remove the coordinator to try again -SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0; - master_remove_node ---------------------------------------------------------------------- - -(1 row) - -ALTER SYSTEM RESET citus.local_hostname; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(.1); -- wait to make sure the config has changed before running the GUC - pg_sleep ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE test(x int, y int); -SELECT create_distributed_table('test','x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node; - groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced ---------------------------------------------------------------------- - 0 | localhost | 57636 | t | t | t | t -(1 row) - -BEGIN; - -- we should not enable MX for this temporary node just because - -- it'd spawn a bg worker targeting this node - -- and that changes the connection count specific tests - -- here - SET LOCAL citus.enable_metadata_sync TO OFF; - -- cannot add workers with specific IP as long as I have a placeholder coordinator record - SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port); -ERROR: cannot add a worker node when the coordinator hostname is set to localhost -DETAIL: Worker nodes need to be able to connect to the coordinator to transfer data. -HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname -COMMIT; -BEGIN; - -- we should not enable MX for this temporary node just because - -- it'd spawn a bg worker targeting this node - -- and that changes the connection count specific tests - -- here - SET LOCAL citus.enable_metadata_sync TO OFF; - -- adding localhost workers is ok - SELECT 1 FROM master_add_node('localhost', :worker_1_port); -NOTICE: shards are still on the coordinator after adding the new node -HINT: Use SELECT rebalance_table_shards(); to balance shards data between workers and coordinator or SELECT citus_drain_node('localhost',57636); to permanently move shards away from the coordinator. - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -COMMIT; --- we don't need this node anymore -SELECT 1 FROM master_remove_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- set the coordinator host to something different than localhost -SELECT 1 FROM citus_set_coordinator_host('127.0.0.1'); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -BEGIN; - -- we should not enable MX for this temporary node just because - -- it'd spawn a bg worker targeting this node - -- and that changes the connection count specific tests - -- here - SET LOCAL citus.enable_metadata_sync TO OFF; - -- adding workers with specific IP is ok now - SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port); -NOTICE: shards are still on the coordinator after adding the new node -HINT: Use SELECT rebalance_table_shards(); to balance shards data between workers and coordinator or SELECT citus_drain_node('127.0.0.1',57636); to permanently move shards away from the coordinator. - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -COMMIT; --- we don't need this node anymore -SELECT 1 FROM master_remove_node('127.0.0.1', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- set the coordinator host back to localhost for the remainder of tests -SELECT 1 FROM citus_set_coordinator_host('localhost'); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- should have shards setting should not really matter for a single node -SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -CREATE TYPE new_type AS (n int, m text); -CREATE TABLE test_2(x int, y int, z new_type); -SELECT create_distributed_table('test_2','x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE ref(a int, b int); -SELECT create_reference_table('ref'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE local(c int, d int); -CREATE TABLE public.another_schema_table(a int, b int); -SELECT create_distributed_table('public.another_schema_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE non_binary_copy_test (key int PRIMARY KEY, value new_type); -SELECT create_distributed_table('non_binary_copy_test', 'key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO non_binary_copy_test SELECT i, (i, 'citus9.5')::new_type FROM generate_series(0,1000)i; --- Confirm the basics work -INSERT INTO test VALUES (1, 2), (3, 4), (5, 6), (2, 7), (4, 5); -SELECT * FROM test WHERE x = 1; - x | y ---------------------------------------------------------------------- - 1 | 2 -(1 row) - -SELECT count(*) FROM test; - count ---------------------------------------------------------------------- - 5 -(1 row) - -SELECT * FROM test ORDER BY x; - x | y ---------------------------------------------------------------------- - 1 | 2 - 2 | 7 - 3 | 4 - 4 | 5 - 5 | 6 -(5 rows) - -UPDATE test SET y = y + 1 RETURNING *; - x | y ---------------------------------------------------------------------- - 1 | 3 - 2 | 8 - 3 | 5 - 4 | 6 - 5 | 7 -(5 rows) - -WITH cte_1 AS (UPDATE test SET y = y - 1 RETURNING *) SELECT * FROM cte_1 ORDER BY 1,2; - x | y ---------------------------------------------------------------------- - 1 | 2 - 2 | 7 - 3 | 4 - 4 | 5 - 5 | 6 -(5 rows) - --- show that we can filter remote commands --- given that citus.grep_remote_commands, we log all commands -SET citus.log_local_commands to true; -SELECT count(*) FROM public.another_schema_table WHERE a = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 0 -(1 row) - --- grep matches all commands -SET citus.grep_remote_commands TO "%%"; -SELECT count(*) FROM public.another_schema_table WHERE a = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 0 -(1 row) - --- only filter a specific shard for the local execution -BEGIN; - SET LOCAL citus.grep_remote_commands TO "%90630515%"; - SELECT count(*) FROM public.another_schema_table; -NOTICE: executing the command locally: SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE true - count ---------------------------------------------------------------------- - 0 -(1 row) - - -- match nothing - SET LOCAL citus.grep_remote_commands TO '%nothing%'; - SELECT count(*) FROM public.another_schema_table; - count ---------------------------------------------------------------------- - 0 -(1 row) - -COMMIT; --- only filter a specific shard for the remote execution -BEGIN; - SET LOCAL citus.enable_local_execution TO FALSE; - SET LOCAL citus.grep_remote_commands TO '%90630515%'; - SET LOCAL citus.log_remote_commands TO ON; - SELECT count(*) FROM public.another_schema_table; -NOTICE: issuing SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE true -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx - count ---------------------------------------------------------------------- - 0 -(1 row) - - -- match nothing - SET LOCAL citus.grep_remote_commands TO '%nothing%'; - SELECT count(*) FROM public.another_schema_table; - count ---------------------------------------------------------------------- - 0 -(1 row) - -COMMIT; -RESET citus.log_local_commands; -RESET citus.grep_remote_commands; --- Test upsert with constraint -CREATE TABLE upsert_test -( - part_key int UNIQUE, - other_col int, - third_col int -); --- distribute the table -SELECT create_distributed_table('upsert_test', 'part_key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- do a regular insert -INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1), (2, 2) RETURNING *; - part_key | other_col | third_col ---------------------------------------------------------------------- - 1 | 1 | - 2 | 2 | -(2 rows) - -SET citus.log_remote_commands to true; --- observe that there is a conflict and the following query does nothing -INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT DO NOTHING RETURNING *; -NOTICE: executing the command locally: INSERT INTO single_node.upsert_test_90630523 AS citus_table_alias (part_key, other_col) VALUES (1, 1) ON CONFLICT DO NOTHING RETURNING part_key, other_col, third_col - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - --- same as the above with different syntax -INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO NOTHING RETURNING *; -NOTICE: executing the command locally: INSERT INTO single_node.upsert_test_90630523 AS citus_table_alias (part_key, other_col) VALUES (1, 1) ON CONFLICT(part_key) DO NOTHING RETURNING part_key, other_col, third_col - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - --- again the same query with another syntax -INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *; -NOTICE: executing the command locally: INSERT INTO single_node.upsert_test_90630523 AS citus_table_alias (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key_90630523 DO NOTHING RETURNING part_key, other_col, third_col - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - -BEGIN; --- force local execution -SELECT count(*) FROM upsert_test WHERE part_key = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.upsert_test_90630523 upsert_test WHERE (part_key OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 1 -(1 row) - -SET citus.log_remote_commands to false; --- multi-shard pushdown query that goes through local execution -INSERT INTO upsert_test (part_key, other_col) SELECT part_key, other_col FROM upsert_test ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *; - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - --- multi-shard pull-to-coordinator query that goes through local execution -INSERT INTO upsert_test (part_key, other_col) SELECT part_key, other_col FROM upsert_test LIMIT 100 ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *; - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - -COMMIT; --- to test citus local tables -select undistribute_table('upsert_test'); -NOTICE: creating a new table for single_node.upsert_test -NOTICE: moving the data of single_node.upsert_test -NOTICE: dropping the old single_node.upsert_test -NOTICE: renaming the new table to single_node.upsert_test - undistribute_table ---------------------------------------------------------------------- - -(1 row) - --- create citus local table -select citus_add_local_table_to_metadata('upsert_test'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - --- test the constraint with local execution -INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *; - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - -DROP TABLE upsert_test; -CREATE TABLE relation_tracking_table_1(id int, nonid int); -SELECT create_distributed_table('relation_tracking_table_1', 'id', colocate_with := 'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO relation_tracking_table_1 select generate_series(6, 10000, 1), 0; -CREATE or REPLACE function foo() -returns setof relation_tracking_table_1 -AS $$ -BEGIN -RETURN query select * from relation_tracking_table_1 order by 1 limit 10; -end; -$$ language plpgsql; -CREATE TABLE relation_tracking_table_2 (id int, nonid int); --- use the relation-access in this session -select foo(); - foo ---------------------------------------------------------------------- - (6,0) - (7,0) - (8,0) - (9,0) - (10,0) - (11,0) - (12,0) - (13,0) - (14,0) - (15,0) -(10 rows) - --- we should be able to use sequential mode, as the previous multi-shard --- relation access has been cleaned-up -BEGIN; -SET LOCAL citus.multi_shard_modify_mode TO sequential; -INSERT INTO relation_tracking_table_2 select generate_series(6, 1000, 1), 0; -SELECT create_distributed_table('relation_tracking_table_2', 'id', colocate_with := 'none'); -NOTICE: Copying data from local table... -NOTICE: copying the data has completed -DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$single_node.relation_tracking_table_2$$) - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT count(*) FROM relation_tracking_table_2; - count ---------------------------------------------------------------------- - 995 -(1 row) - -ROLLBACK; -BEGIN; -INSERT INTO relation_tracking_table_2 select generate_series(6, 1000, 1), 0; -SELECT create_distributed_table('relation_tracking_table_2', 'id', colocate_with := 'none'); -NOTICE: Copying data from local table... -NOTICE: copying the data has completed -DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$single_node.relation_tracking_table_2$$) - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT count(*) FROM relation_tracking_table_2; - count ---------------------------------------------------------------------- - 995 -(1 row) - -COMMIT; -SET client_min_messages TO ERROR; -DROP TABLE relation_tracking_table_2, relation_tracking_table_1 CASCADE; -RESET client_min_messages; -CREATE SCHEMA "Quoed.Schema"; -SET search_path TO "Quoed.Schema"; -CREATE TABLE "long_constraint_upsert\_test" -( - part_key int, - other_col int, - third_col int, - CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" UNIQUE (part_key) -); -NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted " --- distribute the table and create shards -SELECT create_distributed_table('"long_constraint_upsert\_test"', 'part_key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO "long_constraint_upsert\_test" (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" DO NOTHING RETURNING *; -NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted " - part_key | other_col | third_col ---------------------------------------------------------------------- - 1 | 1 | -(1 row) - -ALTER TABLE "long_constraint_upsert\_test" RENAME TO simple_table_name; -INSERT INTO simple_table_name (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" DO NOTHING RETURNING *; -NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted " - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - --- this is currently not supported, but once we support --- make sure that the following query also works fine -ALTER TABLE simple_table_name RENAME CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" TO simple_constraint_name; -NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted " -ERROR: renaming constraints belonging to distributed tables is currently unsupported ---INSERT INTO simple_table_name (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT simple_constraint_name DO NOTHING RETURNING *; -SET search_path TO single_node; -SET client_min_messages TO ERROR; -DROP SCHEMA "Quoed.Schema" CASCADE; -RESET client_min_messages; --- test partitioned index creation with long name -CREATE TABLE test_index_creation1 -( - tenant_id integer NOT NULL, - timeperiod timestamp without time zone NOT NULL, - field1 integer NOT NULL, - inserted_utc timestamp without time zone NOT NULL DEFAULT now(), - PRIMARY KEY(tenant_id, timeperiod) -) PARTITION BY RANGE (timeperiod); -CREATE TABLE test_index_creation1_p2020_09_26 -PARTITION OF test_index_creation1 FOR VALUES FROM ('2020-09-26 00:00:00') TO ('2020-09-27 00:00:00'); -CREATE TABLE test_index_creation1_p2020_09_27 -PARTITION OF test_index_creation1 FOR VALUES FROM ('2020-09-27 00:00:00') TO ('2020-09-28 00:00:00'); -select create_distributed_table('test_index_creation1', 'tenant_id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- should be able to create indexes with INCLUDE/WHERE -CREATE INDEX ix_test_index_creation5 ON test_index_creation1 - USING btree(tenant_id, timeperiod) - INCLUDE (field1) WHERE (tenant_id = 100); --- test if indexes are created -SELECT 1 AS created WHERE EXISTS(SELECT * FROM pg_indexes WHERE indexname LIKE '%test_index_creation%'); - created ---------------------------------------------------------------------- - 1 -(1 row) - --- test citus size functions in transaction with modification -CREATE TABLE test_citus_size_func (a int); -SELECT create_distributed_table('test_citus_size_func', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO test_citus_size_func VALUES(1), (2); -BEGIN; - -- DDL with citus_table_size - ALTER TABLE test_citus_size_func ADD COLUMN newcol INT; - SELECT citus_table_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- DDL with citus_relation_size - ALTER TABLE test_citus_size_func ADD COLUMN newcol INT; - SELECT citus_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- DDL with citus_total_relation_size - ALTER TABLE test_citus_size_func ADD COLUMN newcol INT; - SELECT citus_total_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- single shard insert with citus_table_size - INSERT INTO test_citus_size_func VALUES (3); - SELECT citus_table_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- multi shard modification with citus_table_size - INSERT INTO test_citus_size_func SELECT * FROM test_citus_size_func; - SELECT citus_table_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- single shard insert with citus_relation_size - INSERT INTO test_citus_size_func VALUES (3); - SELECT citus_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- multi shard modification with citus_relation_size - INSERT INTO test_citus_size_func SELECT * FROM test_citus_size_func; - SELECT citus_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- single shard insert with citus_total_relation_size - INSERT INTO test_citus_size_func VALUES (3); - SELECT citus_total_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- multi shard modification with citus_total_relation_size - INSERT INTO test_citus_size_func SELECT * FROM test_citus_size_func; - SELECT citus_total_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; --- we should be able to limit intermediate results -BEGIN; - SET LOCAL citus.max_intermediate_result_size TO 0; - WITH cte_1 AS (SELECT * FROM test OFFSET 0) SELECT * FROM cte_1; -ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 0 kB) -DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. -HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. -ROLLBACK; --- the first cte (cte_1) does not exceed the limit --- but the second (cte_2) exceeds, so we error out -BEGIN; - SET LOCAL citus.max_intermediate_result_size TO '1kB'; - INSERT INTO test SELECT i,i from generate_series(0,1000)i; - -- only pulls 1 row, should not hit the limit - WITH cte_1 AS (SELECT * FROM test LIMIT 1) SELECT count(*) FROM cte_1; - count ---------------------------------------------------------------------- - 1 -(1 row) - - -- cte_1 only pulls 1 row, but cte_2 all rows - WITH cte_1 AS (SELECT * FROM test LIMIT 1), - cte_2 AS (SELECT * FROM test OFFSET 0) - SELECT count(*) FROM cte_1, cte_2; -ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 1 kB) -DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. -HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. -ROLLBACK; --- single shard and multi-shard delete --- inside a transaction block -BEGIN; - DELETE FROM test WHERE y = 5; - INSERT INTO test VALUES (4, 5); - DELETE FROM test WHERE x = 1; - INSERT INTO test VALUES (1, 2); -COMMIT; -CREATE INDEX single_node_i1 ON test(x); -CREATE INDEX single_node_i2 ON test(x,y); -REINDEX SCHEMA single_node; -REINDEX SCHEMA CONCURRENTLY single_node; --- keep one of the indexes --- drop w/wout tx blocks -BEGIN; - DROP INDEX single_node_i2; -ROLLBACK; -DROP INDEX single_node_i2; --- change the schema w/wout TX block -BEGIN; - ALTER TABLE public.another_schema_table SET SCHEMA single_node; -ROLLBACK; -ALTER TABLE public.another_schema_table SET SCHEMA single_node; -BEGIN; - TRUNCATE test; - SELECT * FROM test; - x | y ---------------------------------------------------------------------- -(0 rows) - -ROLLBACK; -VACUUM test; -VACUUM test, test_2; -VACUUM ref, test; -VACUUM ANALYZE test(x); -ANALYZE ref; -ANALYZE test_2; -VACUUM local; -VACUUM local, ref, test, test_2; -VACUUM FULL test, ref; -BEGIN; - ALTER TABLE test ADD COLUMN z INT DEFAULT 66; - SELECT count(*) FROM test WHERE z = 66; - count ---------------------------------------------------------------------- - 5 -(1 row) - -ROLLBACK; --- explain analyze should work on a single node -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) - SELECT * FROM test; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (Citus Adaptive) (actual rows=5 loops=1) - Task Count: 4 - Tuple data received from nodes: 40 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on test_90630506 test (actual rows=2 loops=1) -(8 rows) - --- common utility command -SELECT pg_size_pretty(citus_relation_size('test'::regclass)); - pg_size_pretty ---------------------------------------------------------------------- - 24 kB -(1 row) - --- basic view queries -CREATE VIEW single_node_view AS - SELECT count(*) as cnt FROM test t1 JOIN test t2 USING (x); -SELECT * FROM single_node_view; - cnt ---------------------------------------------------------------------- - 5 -(1 row) - -SELECT * FROM single_node_view, test WHERE test.x = single_node_view.cnt; - cnt | x | y ---------------------------------------------------------------------- - 5 | 5 | 6 -(1 row) - --- copy in/out -BEGIN; - COPY test(x) FROM PROGRAM 'seq 32'; - SELECT count(*) FROM test; - count ---------------------------------------------------------------------- - 37 -(1 row) - - COPY (SELECT count(DISTINCT x) FROM test) TO STDOUT; -32 - INSERT INTO test SELECT i,i FROM generate_series(0,100)i; -ROLLBACK; --- master_create_empty_shard on coordinator -BEGIN; -CREATE TABLE append_table (a INT, b INT); -SELECT create_distributed_table('append_table','a','append'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT master_create_empty_shard('append_table'); -NOTICE: Creating placements for the append partitioned tables on the coordinator is not supported, skipping coordinator ... -ERROR: could only create 0 of 1 of required shard replicas -END; --- alter table inside a tx block -BEGIN; - ALTER TABLE test ADD COLUMN z single_node.new_type; - INSERT INTO test VALUES (99, 100, (1, 'onder')::new_type) RETURNING *; - x | y | z ---------------------------------------------------------------------- - 99 | 100 | (1,onder) -(1 row) - -ROLLBACK; --- prepared statements with custom types -PREPARE single_node_prepare_p1(int, int, new_type) AS - INSERT INTO test_2 VALUES ($1, $2, $3); -EXECUTE single_node_prepare_p1(1, 1, (95, 'citus9.5')::new_type); -EXECUTE single_node_prepare_p1(2 ,2, (94, 'citus9.4')::new_type); -EXECUTE single_node_prepare_p1(3 ,2, (93, 'citus9.3')::new_type); -EXECUTE single_node_prepare_p1(4 ,2, (92, 'citus9.2')::new_type); -EXECUTE single_node_prepare_p1(5 ,2, (91, 'citus9.1')::new_type); -EXECUTE single_node_prepare_p1(6 ,2, (90, 'citus9.0')::new_type); -PREPARE use_local_query_cache(int) AS SELECT count(*) FROM test_2 WHERE x = $1; -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -SET client_min_messages TO DEBUG2; --- the 6th execution will go through the planner --- the 7th execution will skip the planner as it uses the cache -EXECUTE use_local_query_cache(1); -DEBUG: Deferred pruning for a fast-path router query -DEBUG: Creating router plan - count ---------------------------------------------------------------------- - 1 -(1 row) - -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -RESET client_min_messages; --- partitioned table should be fine, adding for completeness -CREATE TABLE collections_list ( - key bigint, - ts timestamptz DEFAULT now(), - collection_id integer, - value numeric, - PRIMARY KEY(key, collection_id) -) PARTITION BY LIST (collection_id ); -SELECT create_distributed_table('collections_list', 'key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE collections_list_0 - PARTITION OF collections_list (key, ts, collection_id, value) - FOR VALUES IN ( 0 ); -CREATE TABLE collections_list_1 - PARTITION OF collections_list (key, ts, collection_id, value) - FOR VALUES IN ( 1 ); -INSERT INTO collections_list SELECT i, '2011-01-01', i % 2, i * i FROM generate_series(0, 100) i; -SELECT count(*) FROM collections_list WHERE key < 10 AND collection_id = 1; - count ---------------------------------------------------------------------- - 5 -(1 row) - -SELECT count(*) FROM collections_list_0 WHERE key < 10 AND collection_id = 1; - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT count(*) FROM collections_list_1 WHERE key = 11; - count ---------------------------------------------------------------------- - 1 -(1 row) - -ALTER TABLE collections_list DROP COLUMN ts; -SELECT * FROM collections_list, collections_list_0 WHERE collections_list.key=collections_list_0.key ORDER BY 1 DESC,2 DESC,3 DESC,4 DESC LIMIT 1; - key | collection_id | value | key | collection_id | value ---------------------------------------------------------------------- - 100 | 0 | 10000 | 100 | 0 | 10000 -(1 row) - --- test hash distribution using INSERT with generate_series() function -CREATE OR REPLACE FUNCTION part_hashint4_noop(value int4, seed int8) -RETURNS int8 AS $$ -SELECT value + seed; -$$ LANGUAGE SQL IMMUTABLE; -CREATE OPERATOR CLASS part_test_int4_ops -FOR TYPE int4 -USING HASH AS -operator 1 =, -function 2 part_hashint4_noop(int4, int8); -CREATE TABLE hash_parted ( - a int, - b int -) PARTITION BY HASH (a part_test_int4_ops); -CREATE TABLE hpart0 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 0); -CREATE TABLE hpart1 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 1); -CREATE TABLE hpart2 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 2); -CREATE TABLE hpart3 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 3); --- Disable metadata sync since citus doesn't support distributing --- operator class for now. -SET citus.enable_metadata_sync TO OFF; -SELECT create_distributed_table('hash_parted ', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO hash_parted VALUES (1, generate_series(1, 10)); -SELECT * FROM hash_parted ORDER BY 1, 2; - a | b ---------------------------------------------------------------------- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 -(10 rows) - -ALTER TABLE hash_parted DETACH PARTITION hpart0; -ALTER TABLE hash_parted DETACH PARTITION hpart1; -ALTER TABLE hash_parted DETACH PARTITION hpart2; -ALTER TABLE hash_parted DETACH PARTITION hpart3; -RESET citus.enable_metadata_sync; --- test range partition without creating partitions and inserting with generate_series() --- should error out even in plain PG since no partition of relation "parent_tab" is found for row --- in Citus it errors out because it fails to evaluate partition key in insert -CREATE TABLE parent_tab (id int) PARTITION BY RANGE (id); -SELECT create_distributed_table('parent_tab', 'id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO parent_tab VALUES (generate_series(0, 3)); -ERROR: failed to evaluate partition key in insert -HINT: try using constant values for partition column --- now it should work -CREATE TABLE parent_tab_1_2 PARTITION OF parent_tab FOR VALUES FROM (1) to (2); -ALTER TABLE parent_tab ADD COLUMN b int; -INSERT INTO parent_tab VALUES (1, generate_series(0, 3)); -SELECT * FROM parent_tab ORDER BY 1, 2; - id | b ---------------------------------------------------------------------- - 1 | 0 - 1 | 1 - 1 | 2 - 1 | 3 -(4 rows) - --- make sure that parallel accesses are good -SET citus.force_max_query_parallelization TO ON; -SELECT * FROM test_2 ORDER BY 1 DESC; - x | y | z ---------------------------------------------------------------------- - 6 | 2 | (90,citus9.0) - 5 | 2 | (91,citus9.1) - 4 | 2 | (92,citus9.2) - 3 | 2 | (93,citus9.3) - 2 | 2 | (94,citus9.4) - 1 | 1 | (95,citus9.5) -(6 rows) - -DELETE FROM test_2 WHERE y = 1000 RETURNING *; - x | y | z ---------------------------------------------------------------------- -(0 rows) - -RESET citus.force_max_query_parallelization ; -BEGIN; - INSERT INTO test_2 VALUES (7 ,2, (83, 'citus8.3')::new_type); - SAVEPOINT s1; - INSERT INTO test_2 VALUES (9 ,1, (82, 'citus8.2')::new_type); - SAVEPOINT s2; - ROLLBACK TO SAVEPOINT s1; - SELECT * FROM test_2 WHERE z = (83, 'citus8.3')::new_type OR z = (82, 'citus8.2')::new_type; - x | y | z ---------------------------------------------------------------------- - 7 | 2 | (83,citus8.3) -(1 row) - - RELEASE SAVEPOINT s1; -COMMIT; -SELECT * FROM test_2 WHERE z = (83, 'citus8.3')::new_type OR z = (82, 'citus8.2')::new_type; - x | y | z ---------------------------------------------------------------------- - 7 | 2 | (83,citus8.3) -(1 row) - --- final query is only intermediate result --- we want PG 11/12/13 behave consistently, the CTEs should be MATERIALIZED -WITH cte_1 AS (SELECT * FROM test_2) SELECT * FROM cte_1 ORDER BY 1,2; - x | y | z ---------------------------------------------------------------------- - 1 | 1 | (95,citus9.5) - 2 | 2 | (94,citus9.4) - 3 | 2 | (93,citus9.3) - 4 | 2 | (92,citus9.2) - 5 | 2 | (91,citus9.1) - 6 | 2 | (90,citus9.0) - 7 | 2 | (83,citus8.3) -(7 rows) - --- final query is router query -WITH cte_1 AS (SELECT * FROM test_2) SELECT * FROM cte_1, test_2 WHERE test_2.x = cte_1.x AND test_2.x = 7 ORDER BY 1,2; - x | y | z | x | y | z ---------------------------------------------------------------------- - 7 | 2 | (83,citus8.3) | 7 | 2 | (83,citus8.3) -(1 row) - --- final query is a distributed query -WITH cte_1 AS (SELECT * FROM test_2) SELECT * FROM cte_1, test_2 WHERE test_2.x = cte_1.x AND test_2.y != 2 ORDER BY 1,2; - x | y | z | x | y | z ---------------------------------------------------------------------- - 1 | 1 | (95,citus9.5) | 1 | 1 | (95,citus9.5) -(1 row) - --- query pushdown should work -SELECT - * -FROM - (SELECT x, count(*) FROM test_2 GROUP BY x) as foo, - (SELECT x, count(*) FROM test_2 GROUP BY x) as bar -WHERE - foo.x = bar.x -ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC -LIMIT 1; - x | count | x | count ---------------------------------------------------------------------- - 7 | 1 | 7 | 1 -(1 row) - --- make sure that foreign keys work fine -ALTER TABLE test_2 ADD CONSTRAINT first_pkey PRIMARY KEY (x); -ALTER TABLE test ADD CONSTRAINT foreign_key FOREIGN KEY (x) REFERENCES test_2(x) ON DELETE CASCADE; --- show that delete on test_2 cascades to test -SELECT * FROM test WHERE x = 5; - x | y ---------------------------------------------------------------------- - 5 | 6 -(1 row) - -DELETE FROM test_2 WHERE x = 5; -SELECT * FROM test WHERE x = 5; - x | y ---------------------------------------------------------------------- -(0 rows) - -INSERT INTO test_2 VALUES (5 ,2, (91, 'citus9.1')::new_type); -INSERT INTO test VALUES (5, 6); -INSERT INTO ref VALUES (1, 2), (5, 6), (7, 8); -SELECT count(*) FROM ref; - count ---------------------------------------------------------------------- - 3 -(1 row) - -SELECT * FROM ref ORDER BY a; - a | b ---------------------------------------------------------------------- - 1 | 2 - 5 | 6 - 7 | 8 -(3 rows) - -SELECT * FROM test, ref WHERE x = a ORDER BY x; - x | y | a | b ---------------------------------------------------------------------- - 1 | 2 | 1 | 2 - 5 | 6 | 5 | 6 -(2 rows) - -INSERT INTO local VALUES (1, 2), (3, 4), (7, 8); -SELECT count(*) FROM local; - count ---------------------------------------------------------------------- - 3 -(1 row) - -SELECT * FROM local ORDER BY c; - c | d ---------------------------------------------------------------------- - 1 | 2 - 3 | 4 - 7 | 8 -(3 rows) - -SELECT * FROM ref, local WHERE a = c ORDER BY a; - a | b | c | d ---------------------------------------------------------------------- - 1 | 2 | 1 | 2 - 7 | 8 | 7 | 8 -(2 rows) - --- Check repartition joins are supported -SET citus.enable_repartition_joins TO ON; -SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET citus.enable_single_hash_repartition_joins TO ON; -SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET search_path TO public; -SET citus.enable_single_hash_repartition_joins TO OFF; -SELECT * FROM single_node.test t1, single_node.test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET citus.enable_single_hash_repartition_joins TO ON; -SELECT * FROM single_node.test t1, single_node.test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET search_path TO single_node; -SET citus.task_assignment_policy TO 'round-robin'; -SET citus.enable_single_hash_repartition_joins TO ON; -SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET citus.task_assignment_policy TO 'greedy'; -SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET citus.task_assignment_policy TO 'first-replica'; -SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -RESET citus.enable_repartition_joins; -RESET citus.enable_single_hash_repartition_joins; --- INSERT SELECT router -BEGIN; -INSERT INTO test(x, y) SELECT x, y FROM test WHERE x = 1; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- INSERT SELECT pushdown -BEGIN; -INSERT INTO test(x, y) SELECT x, y FROM test; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 10 -(1 row) - -ROLLBACK; --- INSERT SELECT analytical query -BEGIN; -INSERT INTO test(x, y) SELECT count(x), max(y) FROM test; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- INSERT SELECT repartition -BEGIN; -INSERT INTO test(x, y) SELECT y, x FROM test; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 10 -(1 row) - -ROLLBACK; --- INSERT SELECT from reference table into distributed -BEGIN; -INSERT INTO test(x, y) SELECT a, b FROM ref; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from local table into distributed -BEGIN; -INSERT INTO test(x, y) SELECT c, d FROM local; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO ref(a, b) SELECT x, y FROM test; -SELECT count(*) from ref; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO ref(a, b) SELECT c, d FROM local; -SELECT count(*) from ref; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO local(c, d) SELECT x, y FROM test; -SELECT count(*) from local; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO local(c, d) SELECT a, b FROM ref; -SELECT count(*) from local; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- Confirm that dummy placements work -SELECT count(*) FROM test WHERE false; - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y); - count ---------------------------------------------------------------------- -(0 rows) - --- Confirm that they work with round-robin task assignment policy -SET citus.task_assignment_policy TO 'round-robin'; -SELECT count(*) FROM test WHERE false; - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y); - count ---------------------------------------------------------------------- -(0 rows) - -RESET citus.task_assignment_policy; -SELECT count(*) FROM test; - count ---------------------------------------------------------------------- - 5 -(1 row) - --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO ref(a, b) SELECT x, y FROM test; -SELECT count(*) from ref; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO ref(a, b) SELECT c, d FROM local; -SELECT count(*) from ref; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO local(c, d) SELECT x, y FROM test; -SELECT count(*) from local; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO local(c, d) SELECT a, b FROM ref; -SELECT count(*) from local; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- query fails on the shards should be handled --- nicely -SELECT x/0 FROM test; -ERROR: division by zero -CONTEXT: while executing command on localhost:xxxxx --- Add "fake" pg_dist_transaction records and run recovery --- to show that it is recovered --- Temporarily disable automatic 2PC recovery -ALTER SYSTEM SET citus.recover_2pc_interval TO -1; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -BEGIN; -CREATE TABLE should_commit (value int); -PREPARE TRANSACTION 'citus_0_should_commit'; --- zero is the coordinator's group id, so we can hard code it -INSERT INTO pg_dist_transaction VALUES (0, 'citus_0_should_commit'); -SELECT recover_prepared_transactions(); - recover_prepared_transactions ---------------------------------------------------------------------- - 1 -(1 row) - --- the table should be seen -SELECT * FROM should_commit; - value ---------------------------------------------------------------------- -(0 rows) - --- set the original back -ALTER SYSTEM RESET citus.recover_2pc_interval; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -RESET citus.task_executor_type; --- make sure undistribute table works fine -ALTER TABLE test DROP CONSTRAINT foreign_key; -SELECT undistribute_table('test_2'); -NOTICE: creating a new table for single_node.test_2 -NOTICE: moving the data of single_node.test_2 -NOTICE: dropping the old single_node.test_2 -NOTICE: renaming the new table to single_node.test_2 - undistribute_table ---------------------------------------------------------------------- - -(1 row) - -SELECT * FROM pg_dist_partition WHERE logicalrelid = 'test_2'::regclass; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted ---------------------------------------------------------------------- -(0 rows) - -CREATE TABLE reference_table_1 (col_1 INT UNIQUE, col_2 INT UNIQUE, UNIQUE (col_2, col_1)); -SELECT create_reference_table('reference_table_1'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE distributed_table_1 (col_1 INT UNIQUE); -SELECT create_distributed_table('distributed_table_1', 'col_1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE citus_local_table_1 (col_1 INT UNIQUE); -SELECT citus_add_local_table_to_metadata('citus_local_table_1'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE partitioned_table_1 (col_1 INT UNIQUE, col_2 INT) PARTITION BY RANGE (col_1); -CREATE TABLE partitioned_table_1_100_200 PARTITION OF partitioned_table_1 FOR VALUES FROM (100) TO (200); -CREATE TABLE partitioned_table_1_200_300 PARTITION OF partitioned_table_1 FOR VALUES FROM (200) TO (300); -SELECT create_distributed_table('partitioned_table_1', 'col_1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -ALTER TABLE citus_local_table_1 ADD CONSTRAINT fkey_1 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_2); -ALTER TABLE reference_table_1 ADD CONSTRAINT fkey_2 FOREIGN KEY (col_2) REFERENCES reference_table_1(col_1); -ALTER TABLE distributed_table_1 ADD CONSTRAINT fkey_3 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_1); -ALTER TABLE citus_local_table_1 ADD CONSTRAINT fkey_4 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_2); -ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_5 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_2); -SELECT undistribute_table('partitioned_table_1', cascade_via_foreign_keys=>true); -NOTICE: converting the partitions of single_node.partitioned_table_1 -NOTICE: creating a new table for single_node.partitioned_table_1 -NOTICE: dropping the old single_node.partitioned_table_1 -NOTICE: renaming the new table to single_node.partitioned_table_1 -NOTICE: creating a new table for single_node.reference_table_1 -NOTICE: moving the data of single_node.reference_table_1 -NOTICE: dropping the old single_node.reference_table_1 -NOTICE: renaming the new table to single_node.reference_table_1 -NOTICE: creating a new table for single_node.distributed_table_1 -NOTICE: moving the data of single_node.distributed_table_1 -NOTICE: dropping the old single_node.distributed_table_1 -NOTICE: renaming the new table to single_node.distributed_table_1 -NOTICE: creating a new table for single_node.citus_local_table_1 -NOTICE: moving the data of single_node.citus_local_table_1 -NOTICE: dropping the old single_node.citus_local_table_1 -NOTICE: renaming the new table to single_node.citus_local_table_1 -NOTICE: creating a new table for single_node.partitioned_table_1_100_200 -NOTICE: moving the data of single_node.partitioned_table_1_100_200 -NOTICE: dropping the old single_node.partitioned_table_1_100_200 -NOTICE: renaming the new table to single_node.partitioned_table_1_100_200 -NOTICE: creating a new table for single_node.partitioned_table_1_200_300 -NOTICE: moving the data of single_node.partitioned_table_1_200_300 -NOTICE: dropping the old single_node.partitioned_table_1_200_300 -NOTICE: renaming the new table to single_node.partitioned_table_1_200_300 - undistribute_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE local_table_1 (col_1 INT UNIQUE); -CREATE TABLE local_table_2 (col_1 INT UNIQUE); -CREATE TABLE local_table_3 (col_1 INT UNIQUE); -ALTER TABLE local_table_2 ADD CONSTRAINT fkey_6 FOREIGN KEY (col_1) REFERENCES local_table_1(col_1); -ALTER TABLE local_table_3 ADD CONSTRAINT fkey_7 FOREIGN KEY (col_1) REFERENCES local_table_1(col_1); -ALTER TABLE local_table_1 ADD CONSTRAINT fkey_8 FOREIGN KEY (col_1) REFERENCES local_table_1(col_1); -SELECT citus_add_local_table_to_metadata('local_table_2', cascade_via_foreign_keys=>true); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -CREATE PROCEDURE call_delegation(x int) LANGUAGE plpgsql AS $$ -BEGIN - INSERT INTO test (x) VALUES ($1); -END;$$; -SELECT * FROM pg_dist_node; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------------------------------------------------------------------- - 5 | 0 | localhost | 57636 | default | t | t | primary | default | t | t -(1 row) - -SELECT create_distributed_function('call_delegation(int)', '$1', 'test'); - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - -CREATE FUNCTION function_delegation(int) RETURNS void AS $$ -BEGIN -UPDATE test SET y = y + 1 WHERE x < $1; -END; -$$ LANGUAGE plpgsql; -SELECT create_distributed_function('function_delegation(int)', '$1', 'test'); - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - -SET client_min_messages TO DEBUG1; -CALL call_delegation(1); -DEBUG: not pushing down procedure to the same node -SELECT function_delegation(1); -DEBUG: not pushing down function to the same node - function_delegation ---------------------------------------------------------------------- - -(1 row) - -SET client_min_messages TO WARNING; -DROP TABLE test CASCADE; -CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_client_backend_count() - RETURNS bigint - LANGUAGE C STRICT - AS 'citus', $$get_all_active_client_backend_count$$; --- set the cached connections to zero --- and execute a distributed query so that --- we end up with zero cached connections afterwards -ALTER SYSTEM SET citus.max_cached_conns_per_worker TO 0; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - --- disable deadlock detection and re-trigger 2PC recovery --- once more when citus.max_cached_conns_per_worker is zero --- so that we can be sure that the connections established for --- maintanince daemon is closed properly. --- this is to prevent random failures in the tests (otherwise, we --- might see connections established for this operations) -ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO -1; -ALTER SYSTEM SET citus.recover_2pc_interval TO '1ms'; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(0.1); - pg_sleep ---------------------------------------------------------------------- - -(1 row) - --- now that last 2PC recovery is done, we're good to disable it -ALTER SYSTEM SET citus.recover_2pc_interval TO '-1'; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - --- test alter_distributed_table UDF -CREATE TABLE adt_table (a INT, b INT); -CREATE TABLE adt_col (a INT UNIQUE, b INT); -CREATE TABLE adt_ref (a INT REFERENCES adt_col(a)); -SELECT create_distributed_table('adt_table', 'a', colocate_with:='none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table('adt_col', 'a', colocate_with:='adt_table'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table('adt_ref', 'a', colocate_with:='adt_table'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO adt_table VALUES (1, 2), (3, 4), (5, 6); -INSERT INTO adt_col VALUES (3, 4), (5, 6), (7, 8); -INSERT INTO adt_ref VALUES (3), (5); -SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'adt%'; - table_name | citus_table_type | distribution_column | shard_count ---------------------------------------------------------------------- - adt_col | distributed | a | 4 - adt_ref | distributed | a | 4 - adt_table | distributed | a | 4 -(3 rows) - -SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables WHERE table_name::text LIKE 'adt%' GROUP BY colocation_id ORDER BY 1; - Colocation Groups ---------------------------------------------------------------------- - adt_col, adt_ref, adt_table -(1 row) - -SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint - WHERE (conrelid::regclass::text = 'adt_col' OR confrelid::regclass::text = 'adt_col') ORDER BY 1; - Referencing Table | Definition ---------------------------------------------------------------------- - adt_col | UNIQUE (a) - adt_ref | FOREIGN KEY (a) REFERENCES adt_col(a) -(2 rows) - -SELECT alter_distributed_table('adt_table', shard_count:=6, cascade_to_colocated:=true); - alter_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'adt%'; - table_name | citus_table_type | distribution_column | shard_count ---------------------------------------------------------------------- - adt_col | distributed | a | 6 - adt_ref | distributed | a | 6 - adt_table | distributed | a | 6 -(3 rows) - -SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables WHERE table_name::text LIKE 'adt%' GROUP BY colocation_id ORDER BY 1; - Colocation Groups ---------------------------------------------------------------------- - adt_col, adt_ref, adt_table -(1 row) - -SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint - WHERE (conrelid::regclass::text = 'adt_col' OR confrelid::regclass::text = 'adt_col') ORDER BY 1; - Referencing Table | Definition ---------------------------------------------------------------------- - adt_col | UNIQUE (a) - adt_ref | FOREIGN KEY (a) REFERENCES adt_col(a) -(2 rows) - -SELECT alter_distributed_table('adt_table', distribution_column:='b', colocate_with:='none'); - alter_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'adt%'; - table_name | citus_table_type | distribution_column | shard_count ---------------------------------------------------------------------- - adt_col | distributed | a | 6 - adt_ref | distributed | a | 6 - adt_table | distributed | b | 6 -(3 rows) - -SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables WHERE table_name::text LIKE 'adt%' GROUP BY colocation_id ORDER BY 1; - Colocation Groups ---------------------------------------------------------------------- - adt_col, adt_ref - adt_table -(2 rows) - -SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint - WHERE (conrelid::regclass::text = 'adt_col' OR confrelid::regclass::text = 'adt_col') ORDER BY 1; - Referencing Table | Definition ---------------------------------------------------------------------- - adt_col | UNIQUE (a) - adt_ref | FOREIGN KEY (a) REFERENCES adt_col(a) -(2 rows) - -SELECT * FROM adt_table ORDER BY 1; - a | b ---------------------------------------------------------------------- - 1 | 2 - 3 | 4 - 5 | 6 -(3 rows) - -SELECT * FROM adt_col ORDER BY 1; - a | b ---------------------------------------------------------------------- - 3 | 4 - 5 | 6 - 7 | 8 -(3 rows) - -SELECT * FROM adt_ref ORDER BY 1; - a ---------------------------------------------------------------------- - 3 - 5 -(2 rows) - --- make sure that COPY (e.g., INSERT .. SELECT) and --- alter_distributed_table works in the same TX -BEGIN; -SET LOCAL citus.enable_local_execution=OFF; -INSERT INTO adt_table SELECT x, x+1 FROM generate_series(1, 1000) x; -SELECT alter_distributed_table('adt_table', distribution_column:='a'); - alter_distributed_table ---------------------------------------------------------------------- - -(1 row) - -ROLLBACK; -BEGIN; -INSERT INTO adt_table SELECT x, x+1 FROM generate_series(1, 1000) x; -SELECT alter_distributed_table('adt_table', distribution_column:='a'); - alter_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT COUNT(*) FROM adt_table; - count ---------------------------------------------------------------------- - 1003 -(1 row) - -END; -SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text = 'adt_table'; - table_name | citus_table_type | distribution_column | shard_count ---------------------------------------------------------------------- - adt_table | distributed | a | 6 -(1 row) - -\c - - - :master_port --- sometimes Postgres is a little slow to terminate the backends --- even if PGFinish is sent. So, to prevent any flaky tests, sleep -SELECT pg_sleep(0.1); - pg_sleep ---------------------------------------------------------------------- - -(1 row) - --- since max_cached_conns_per_worker == 0 at this point, the --- backend(s) that execute on the shards will be terminated --- so show that there no internal backends -SET search_path TO single_node; -SET citus.next_shard_id TO 90730500; -SELECT count(*) from should_commit; - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'citus_internal%'; - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT get_all_active_client_backend_count(); - get_all_active_client_backend_count ---------------------------------------------------------------------- - 1 -(1 row) - -BEGIN; - SET LOCAL citus.shard_count TO 32; - SET LOCAL citus.force_max_query_parallelization TO ON; - SET LOCAL citus.enable_local_execution TO false; - CREATE TABLE test (a int); - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('test', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - - SELECT count(*) FROM test; - count ---------------------------------------------------------------------- - 0 -(1 row) - - -- now, we should have additional 32 connections - SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'citus_internal%'; - count ---------------------------------------------------------------------- - 32 -(1 row) - - -- single external connection - SELECT get_all_active_client_backend_count(); - get_all_active_client_backend_count ---------------------------------------------------------------------- - 1 -(1 row) - -ROLLBACK; -\c - - - :master_port -SET search_path TO single_node; -SET citus.next_shard_id TO 90830500; --- simulate that even if there is no connection slots --- to connect, Citus can switch to local execution -SET citus.force_max_query_parallelization TO false; -SET citus.log_remote_commands TO ON; -ALTER SYSTEM SET citus.local_shared_pool_size TO -1; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(0.1); - pg_sleep ---------------------------------------------------------------------- - -(1 row) - -SET citus.executor_slow_start_interval TO 10; -SELECT count(*) from another_schema_table; -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630515 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630516 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630517 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630518 another_schema_table WHERE true - count ---------------------------------------------------------------------- - 0 -(1 row) - -UPDATE another_schema_table SET b = b; -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = b --- INSERT .. SELECT pushdown and INSERT .. SELECT via repartitioning --- not that we ignore INSERT .. SELECT via coordinator as it relies on --- COPY command -INSERT INTO another_schema_table SELECT * FROM another_schema_table; -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE (a IS NOT NULL) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE (a IS NOT NULL) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE (a IS NOT NULL) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE (a IS NOT NULL) -INSERT INTO another_schema_table SELECT b::int, a::int FROM another_schema_table; -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630515_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630515_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630516_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630516_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630517_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630517_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630518_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630518_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 --- multi-row INSERTs -INSERT INTO another_schema_table VALUES (1,1), (2,2), (3,3), (4,4), (5,5),(6,6),(7,7); -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) VALUES (1,1), (5,5) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) VALUES (3,3), (4,4), (7,7) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) VALUES (6,6) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) VALUES (2,2) --- INSERT..SELECT with re-partitioning when using local execution -BEGIN; -INSERT INTO another_schema_table VALUES (1,100); -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 (a, b) VALUES (1, 100) -INSERT INTO another_schema_table VALUES (2,100); -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 (a, b) VALUES (2, 100) -INSERT INTO another_schema_table SELECT b::int, a::int FROM another_schema_table; -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630515_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630515_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630516_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630516_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630517_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630517_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630518_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630518_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630515_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630516_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630515_to_2,repartitioned_results_xxxxx_from_90630517_to_2,repartitioned_results_xxxxx_from_90630518_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630518_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) -SELECT * FROM another_schema_table WHERE a = 100 ORDER BY b; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 100) ORDER BY b - a | b ---------------------------------------------------------------------- - 100 | 1 - 100 | 2 -(2 rows) - -ROLLBACK; --- intermediate results -WITH cte_1 AS (SELECT * FROM another_schema_table LIMIT 1000) - SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 - count ---------------------------------------------------------------------- - 7 -(1 row) - --- this is to get ready for the next tests -TRUNCATE another_schema_table; -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE --- copy can use local execution even if there is no connection available -COPY another_schema_table(a) FROM PROGRAM 'seq 32'; -NOTICE: executing the copy locally for shard xxxxx -CONTEXT: COPY another_schema_table, line 1: "1" -NOTICE: executing the copy locally for shard xxxxx -CONTEXT: COPY another_schema_table, line 2: "2" -NOTICE: executing the copy locally for shard xxxxx -CONTEXT: COPY another_schema_table, line 3: "3" -NOTICE: executing the copy locally for shard xxxxx -CONTEXT: COPY another_schema_table, line 6: "6" --- INSERT .. SELECT with co-located intermediate results -SET citus.log_remote_commands to false; -CREATE UNIQUE INDEX another_schema_table_pk ON another_schema_table(a); -SET citus.log_local_commands to true; -INSERT INTO another_schema_table SELECT * FROM another_schema_table LIMIT 10000 ON CONFLICT(a) DO NOTHING; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING -INSERT INTO another_schema_table SELECT * FROM another_schema_table ORDER BY a LIMIT 10 ON CONFLICT(a) DO UPDATE SET b = EXCLUDED.b + 1 RETURNING *; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b - a | b ---------------------------------------------------------------------- - 1 | - 2 | - 3 | - 4 | - 5 | - 6 | - 7 | - 8 | - 9 | - 10 | -(10 rows) - --- INSERT .. SELECT with co-located intermediate result for non-binary input -WITH cte_1 AS -(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING value) -SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(value single_node.new_type)) cte_1 - count ---------------------------------------------------------------------- - 1001 -(1 row) - --- test with NULL columns -ALTER TABLE non_binary_copy_test ADD COLUMN z INT; -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630519, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630520, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630521, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630522, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;') -WITH cte_1 AS -(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING z) -SELECT bool_and(z is null) FROM cte_1; -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: SELECT bool_and((z IS NULL)) AS bool_and FROM (SELECT intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(z integer)) cte_1 - bool_and ---------------------------------------------------------------------- - t -(1 row) - --- test with type coersion (int -> text) and also NULL values with coersion -WITH cte_1 AS -(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING key, z) -SELECT count(DISTINCT key::text), count(DISTINCT z::text) FROM cte_1; -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z -NOTICE: executing the command locally: SELECT count(DISTINCT (key)::text) AS count, count(DISTINCT (z)::text) AS count FROM (SELECT intermediate_result.key, intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, z integer)) cte_1 - count | count ---------------------------------------------------------------------- - 1001 | 0 -(1 row) - --- test disabling drop and truncate for known shards -SET citus.shard_replication_factor TO 1; -CREATE TABLE test_disabling_drop_and_truncate (a int); -SELECT create_distributed_table('test_disabling_drop_and_truncate', 'a'); -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830500, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830500, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830501, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830501, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830502, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830502, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830503, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830503, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres') - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SET citus.enable_manual_changes_to_shards TO off; --- these should error out -DROP TABLE test_disabling_drop_and_truncate_90830500; -ERROR: cannot modify "test_disabling_drop_and_truncate_90830500" because it is a shard of a distributed table -HINT: Use the distributed table or set citus.enable_manual_changes_to_shards to on to modify shards directly -TRUNCATE TABLE test_disabling_drop_and_truncate_90830500; -ERROR: cannot modify "test_disabling_drop_and_truncate_90830500" because it is a shard of a distributed table -HINT: Use the distributed table or set citus.enable_manual_changes_to_shards to on to modify shards directly -RESET citus.enable_manual_changes_to_shards ; --- these should work as expected -TRUNCATE TABLE test_disabling_drop_and_truncate_90830500; -DROP TABLE test_disabling_drop_and_truncate_90830500; -DROP TABLE test_disabling_drop_and_truncate; --- test creating distributed or reference tables from shards -CREATE TABLE test_creating_distributed_relation_table_from_shard (a int); -SELECT create_distributed_table('test_creating_distributed_relation_table_from_shard', 'a'); -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830504, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830504, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830505, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830505, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830506, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830506, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830507, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830507, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- these should error because shards cannot be used to: --- create distributed table -SELECT create_distributed_table('test_creating_distributed_relation_table_from_shard_90830504', 'a'); -ERROR: relation "test_creating_distributed_relation_table_from_shard_90830504" is a shard relation --- create reference table -SELECT create_reference_table('test_creating_distributed_relation_table_from_shard_90830504'); -ERROR: relation "test_creating_distributed_relation_table_from_shard_90830504" is a shard relation -RESET citus.shard_replication_factor; -DROP TABLE test_creating_distributed_relation_table_from_shard; --- lets flush the copy often to make sure everyhing is fine -SET citus.local_copy_flush_threshold TO 1; -TRUNCATE another_schema_table; -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -INSERT INTO another_schema_table(a) SELECT i from generate_Series(0,10000)i; -NOTICE: executing the copy locally for shard xxxxx -NOTICE: executing the copy locally for shard xxxxx -NOTICE: executing the copy locally for shard xxxxx -NOTICE: executing the copy locally for shard xxxxx -WITH cte_1 AS -(INSERT INTO another_schema_table SELECT * FROM another_schema_table ORDER BY a LIMIT 10000 ON CONFLICT(a) DO NOTHING RETURNING *) -SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 - count ---------------------------------------------------------------------- - 0 -(1 row) - -WITH cte_1 AS -(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING z) -SELECT bool_and(z is null) FROM cte_1; -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: SELECT bool_and((z IS NULL)) AS bool_and FROM (SELECT intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(z integer)) cte_1 - bool_and ---------------------------------------------------------------------- - t -(1 row) - -RESET citus.local_copy_flush_threshold; -RESET citus.local_copy_flush_threshold; -CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC() -RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus', -$$coordinated_transaction_should_use_2PC$$; --- a multi-shard/single-shard select that is failed over to local --- execution doesn't start a 2PC -BEGIN; - SELECT count(*) FROM another_schema_table; -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630515 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630516 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630517 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630518 another_schema_table WHERE true - count ---------------------------------------------------------------------- - 10001 -(1 row) - - SELECT count(*) FROM another_schema_table WHERE a = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630515 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 1 -(1 row) - - WITH cte_1 as (SELECT * FROM another_schema_table LIMIT 10) - SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 - count ---------------------------------------------------------------------- - 10 -(1 row) - - WITH cte_1 as (SELECT * FROM another_schema_table WHERE a = 1 LIMIT 10) - SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT another_schema_table.a, another_schema_table.b FROM single_node.another_schema_table_90630515 another_schema_table WHERE (another_schema_table.a OPERATOR(pg_catalog.=) 1) LIMIT 10) cte_1 - count ---------------------------------------------------------------------- - 1 -(1 row) - - SELECT coordinated_transaction_should_use_2PC(); - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - f -(1 row) - -ROLLBACK; --- same without a transaction block -WITH cte_1 AS (SELECT count(*) as cnt FROM another_schema_table LIMIT 1000), - cte_2 AS (SELECT coordinated_transaction_should_use_2PC() as enabled_2pc) -SELECT cnt, enabled_2pc FROM cte_1, cte_2; -NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT cte_1.cnt, cte_2.enabled_2pc FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) cte_1, (SELECT intermediate_result.enabled_2pc FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(enabled_2pc boolean)) cte_2 - cnt | enabled_2pc ---------------------------------------------------------------------- - 10001 | f -(1 row) - --- a multi-shard modification that is failed over to local --- execution starts a 2PC -BEGIN; - UPDATE another_schema_table SET b = b + 1; -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) - SELECT coordinated_transaction_should_use_2PC(); - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - t -(1 row) - -ROLLBACK; --- a multi-shard modification that is failed over to local --- execution starts a 2PC -BEGIN; - WITH cte_1 AS (UPDATE another_schema_table SET b = b + 1 RETURNING *) - SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 - count ---------------------------------------------------------------------- - 10001 -(1 row) - - SELECT coordinated_transaction_should_use_2PC(); - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - t -(1 row) - -ROLLBACK; --- same without transaction block -WITH cte_1 AS (UPDATE another_schema_table SET b = b + 1 RETURNING *) -SELECT coordinated_transaction_should_use_2PC(); -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: SELECT single_node.coordinated_transaction_should_use_2pc() AS coordinated_transaction_should_use_2pc - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - t -(1 row) - --- a single-shard modification that is failed over to local --- starts 2PC execution -BEGIN; - UPDATE another_schema_table SET b = b + 1 WHERE a = 1; -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) WHERE (a OPERATOR(pg_catalog.=) 1) - SELECT coordinated_transaction_should_use_2PC(); - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - t -(1 row) - -ROLLBACK; --- if the local execution is disabled, we cannot failover to --- local execution and the queries would fail -SET citus.enable_local_execution TO false; -SELECT count(*) from another_schema_table; -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; -UPDATE another_schema_table SET b = b; -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; -INSERT INTO another_schema_table SELECT * FROM another_schema_table; -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; -INSERT INTO another_schema_table SELECT b::int, a::int FROM another_schema_table; -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; -WITH cte_1 AS (SELECT * FROM another_schema_table LIMIT 1000) - SELECT count(*) FROM cte_1; -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; -INSERT INTO another_schema_table VALUES (1,1), (2,2), (3,3), (4,4), (5,5),(6,6),(7,7); -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; --- copy fails if local execution is disabled and there is no connection slot -COPY another_schema_table(a) FROM PROGRAM 'seq 32'; -ERROR: could not find an available connection -HINT: Set citus.max_shared_pool_size TO -1 to let COPY command finish -CONTEXT: COPY another_schema_table, line 1: "1" --- set the values to originals back -ALTER SYSTEM RESET citus.max_cached_conns_per_worker; -ALTER SYSTEM RESET citus.distributed_deadlock_detection_factor; -ALTER SYSTEM RESET citus.recover_2pc_interval; -ALTER SYSTEM RESET citus.distributed_deadlock_detection_factor; -ALTER SYSTEM RESET citus.local_shared_pool_size; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - --- suppress notices -SET client_min_messages TO error; --- cannot remove coordinator since a reference table exists on coordinator and no other worker nodes are added -SELECT 1 FROM master_remove_node('localhost', :master_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx -DETAIL: One of the table(s) that prevents the operation complete successfully is single_node.ref -HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables --- Cleanup -DROP SCHEMA single_node CASCADE; --- Remove the coordinator again -SELECT 1 FROM master_remove_node('localhost', :master_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- restart nodeid sequence so that multi_cluster_management still has the same --- nodeids -ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 1; diff --git a/src/test/regress/sql/alter_table_set_access_method.sql b/src/test/regress/sql/alter_table_set_access_method.sql index b9e214dab..0109f4d24 100644 --- a/src/test/regress/sql/alter_table_set_access_method.sql +++ b/src/test/regress/sql/alter_table_set_access_method.sql @@ -266,16 +266,8 @@ create table events (event_id bigserial, event_time timestamptz default now(), p create index on events (event_id); insert into events (payload) select 'hello-'||s from generate_series(1,10) s; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset - BEGIN; - \if :server_version_ge_16 SET LOCAL debug_parallel_query = regress; - \else - SET LOCAL force_parallel_mode = regress; - \endif SET LOCAL min_parallel_table_scan_size = 1; SET LOCAL parallel_tuple_cost = 0; SET LOCAL max_parallel_workers = 4; diff --git a/src/test/regress/sql/columnar_fallback_scan.sql b/src/test/regress/sql/columnar_fallback_scan.sql index 93a701062..f79af130e 100644 --- a/src/test/regress/sql/columnar_fallback_scan.sql +++ b/src/test/regress/sql/columnar_fallback_scan.sql @@ -20,16 +20,7 @@ select count(*), min(i), max(i), avg(i) from fallback_scan; -- Negative test: try to force a parallel plan with at least two -- workers, but columnar should reject it and use a non-parallel scan. -- - -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset - -\if :server_version_ge_16 set debug_parallel_query = regress; -\else -set force_parallel_mode = regress; -\endif set min_parallel_table_scan_size = 1; set parallel_tuple_cost = 0; set max_parallel_workers = 4; @@ -37,11 +28,7 @@ set max_parallel_workers_per_gather = 4; explain (costs off) select count(*), min(i), max(i), avg(i) from fallback_scan; select count(*), min(i), max(i), avg(i) from fallback_scan; -\if :server_version_ge_16 set debug_parallel_query = default; -\else -set force_parallel_mode = default; -\endif set min_parallel_table_scan_size to default; set parallel_tuple_cost to default; set max_parallel_workers to default; diff --git a/src/test/regress/sql/columnar_indexes.sql b/src/test/regress/sql/columnar_indexes.sql index 6e54b8591..dd9b70d1d 100644 --- a/src/test/regress/sql/columnar_indexes.sql +++ b/src/test/regress/sql/columnar_indexes.sql @@ -414,10 +414,6 @@ BEGIN; -- this wouldn't flush any data insert into events (payload) select 'hello-'||s from generate_series(1, 10) s; - SHOW server_version \gset - SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 - \gset - -- Since table is large enough, normally postgres would prefer using -- parallel workers when building the index. -- @@ -430,11 +426,7 @@ BEGIN; -- following commnad to fail since we prevent using parallel workers for -- columnar tables. - \if :server_version_ge_16 SET LOCAL debug_parallel_query = regress; - \else - SET LOCAL force_parallel_mode = regress; - \endif SET LOCAL min_parallel_table_scan_size = 1; SET LOCAL parallel_tuple_cost = 0; SET LOCAL max_parallel_workers = 4; diff --git a/src/test/regress/sql/columnar_partitioning.sql b/src/test/regress/sql/columnar_partitioning.sql index 8e91b8919..f8a49f438 100644 --- a/src/test/regress/sql/columnar_partitioning.sql +++ b/src/test/regress/sql/columnar_partitioning.sql @@ -24,16 +24,9 @@ INSERT INTO parent SELECT '2020-03-15', 30, 300, 'three thousand' INSERT INTO parent SELECT '2020-04-15', 30, 300, 'three thousand' FROM generate_series(1,100000); -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset - -- run parallel plans -\if :server_version_ge_16 SET debug_parallel_query = regress; -\else -SET force_parallel_mode = regress; -\endif + SET min_parallel_table_scan_size = 1; SET parallel_tuple_cost = 0; SET max_parallel_workers = 4; @@ -57,11 +50,8 @@ EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent; SELECT count(*), sum(i), min(i), max(i) FROM parent; SET columnar.enable_custom_scan TO DEFAULT; -\if :server_version_ge_16 SET debug_parallel_query TO DEFAULT; -\else -SET force_parallel_mode TO DEFAULT; -\endif + SET min_parallel_table_scan_size TO DEFAULT; SET parallel_tuple_cost TO DEFAULT; SET max_parallel_workers TO DEFAULT; diff --git a/src/test/regress/sql/create_drop_database_propagation_pg15.sql b/src/test/regress/sql/create_drop_database_propagation_pg15.sql index 40d1b9e09..4f57f9112 100644 --- a/src/test/regress/sql/create_drop_database_propagation_pg15.sql +++ b/src/test/regress/sql/create_drop_database_propagation_pg15.sql @@ -1,14 +1,3 @@ --- --- PG15 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q -\endif - -- create/drop database for pg >= 15 set citus.enable_create_database_propagation=on; diff --git a/src/test/regress/sql/create_drop_database_propagation_pg16.sql b/src/test/regress/sql/create_drop_database_propagation_pg16.sql index cec553813..bfe0862b8 100644 --- a/src/test/regress/sql/create_drop_database_propagation_pg16.sql +++ b/src/test/regress/sql/create_drop_database_propagation_pg16.sql @@ -1,14 +1,3 @@ --- --- PG16 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset -\if :server_version_ge_16 -\else -\q -\endif - -- create/drop database for pg >= 16 set citus.enable_create_database_propagation=on; diff --git a/src/test/regress/sql/merge_unsupported.sql b/src/test/regress/sql/merge_unsupported.sql index ef95e01ea..9903fd6a5 100644 --- a/src/test/regress/sql/merge_unsupported.sql +++ b/src/test/regress/sql/merge_unsupported.sql @@ -1,18 +1,9 @@ - - SHOW server_version \gset SELECT CASE WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+' WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16' - WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14' ELSE 'Unsupported version' END AS version_category; -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q -\endif -- -- MERGE test from PG community (adapted to Citus by converting all tables to Citus local) diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index 9e67fa337..6482cdfb3 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -798,21 +798,14 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- with an ugly trick, update the vartype of table from int to bigint -- so that making two tables colocated fails - -- include varnullingrels for PG16+ - SHOW server_version \gset - SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 - \gset -- include varreturningtype for PG18+ SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset \if :server_version_ge_18 UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varreturningtype 0 :varnoold 1 :varoattno 1 :location -1}' WHERE logicalrelid = 'test_2'::regclass; - \elif :server_version_ge_16 - UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}' - WHERE logicalrelid = 'test_2'::regclass; \else - UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}' + UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}' WHERE logicalrelid = 'test_2'::regclass; \endif diff --git a/src/test/regress/sql/multi_complex_count_distinct.sql b/src/test/regress/sql/multi_complex_count_distinct.sql index 0e06fc0c8..d2024607a 100644 --- a/src/test/regress/sql/multi_complex_count_distinct.sql +++ b/src/test/regress/sql/multi_complex_count_distinct.sql @@ -1,14 +1,6 @@ -- -- COMPLEX_COUNT_DISTINCT -- --- This test file has an alternative output because of the following in PG16: --- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e --- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3 --- The alternative output can be deleted when we drop support for PG15 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; - SET citus.next_shard_id TO 240000; SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql index 1c4841c6f..b22c5c200 100644 --- a/src/test/regress/sql/multi_explain.sql +++ b/src/test/regress/sql/multi_explain.sql @@ -1,17 +1,11 @@ -- -- MULTI_EXPLAIN -- --- This test file has an alternative output because of the following in PG16: --- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e --- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3 --- The alternative output can be deleted when we drop support for PG15 --- -- This test file has an alternative output because of the following in PG18: -- https://github.com/postgres/postgres/commit/161320b4b960ee4fe918959be6529ae9b106ea5a -- The alternative output can be deleted when we drop support for PG17 -- SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18; SET citus.next_shard_id TO 570000; diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql index 1a267b301..a6ad30997 100644 --- a/src/test/regress/sql/multi_mx_create_table.sql +++ b/src/test/regress/sql/multi_mx_create_table.sql @@ -59,8 +59,6 @@ CREATE OPERATOR citus_mx_test_schema.=== ( SET search_path TO public; SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset @@ -69,12 +67,10 @@ SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 -- Relevant PG commit: -- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset -\elif :server_version_ge_16 +\else -- In PG16, read-only server settings lc_collate and lc_ctype are removed -- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982 SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset -\else -SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset \endif CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale); diff --git a/src/test/regress/sql/multi_mx_hide_shard_names.sql b/src/test/regress/sql/multi_mx_hide_shard_names.sql index 20068e0bb..3d1bb7ad9 100644 --- a/src/test/regress/sql/multi_mx_hide_shard_names.sql +++ b/src/test/regress/sql/multi_mx_hide_shard_names.sql @@ -1,6 +1,12 @@ -- -- Hide shard names on MX worker nodes -- +-- PostgreSQL 18 planner changes (probably AIO and updated cost model) make +-- sequential scans cheaper, so the psql `\d table`-style query that uses a +-- regex on `pg_class.relname` no longer chooses an index scan. This causes +-- a plan difference. +-- Alternative test output can be removed when we drop PG17 support +-- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1130000; @@ -245,10 +251,6 @@ RESET citus.enable_metadata_sync; -- the shards and indexes do not show up SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; --- PG16 added one more backend type B_STANDALONE_BACKEND --- and also alphabetized the backend types, hence the orders changed --- Relevant PG16 commit: --- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690 -- Relevant Pg17 commit: -- https://github.com/postgres/postgres/commit/067701f57758f9baed5bd9d868539738d77bfa92 -- Relevant PG18 commit: @@ -256,7 +258,6 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 18 AS server_version_ge_18 \gset SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset \if :server_version_ge_18 SELECT 1 AS client_backend \gset SELECT 5 AS bgworker \gset @@ -265,14 +266,10 @@ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \g SELECT 1 AS client_backend \gset SELECT 4 AS bgworker \gset SELECT 5 AS walsender \gset -\elif :server_version_ge_16 +\else SELECT 4 AS client_backend \gset SELECT 5 AS bgworker \gset SELECT 12 AS walsender \gset -\else - SELECT 3 AS client_backend \gset - SELECT 4 AS bgworker \gset - SELECT 9 AS walsender \gset \endif -- say, we set it to bgworker diff --git a/src/test/regress/sql/multi_outer_join_columns.sql b/src/test/regress/sql/multi_outer_join_columns.sql index 8e49c4bcf..4bad9a82b 100644 --- a/src/test/regress/sql/multi_outer_join_columns.sql +++ b/src/test/regress/sql/multi_outer_join_columns.sql @@ -3,13 +3,6 @@ --- varnullingrels field of a VAR node may contain relids of join relations that can make the var --- NULL; in a rewritten distributed query without a join such relids do not have a meaning. --- This test has an alternative goldfile because of the following feature in Postgres 16: --- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e --- - -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16; - CREATE SCHEMA outer_join_columns_testing; SET search_path to 'outer_join_columns_testing'; SET citus.next_shard_id TO 30070000; diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql index 13be94857..4876093bc 100644 --- a/src/test/regress/sql/multi_schema_support.sql +++ b/src/test/regress/sql/multi_schema_support.sql @@ -295,8 +295,6 @@ SELECT * FROM nation_hash ORDER BY 1,2,3,4; SET search_path TO public; SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 \gset @@ -305,12 +303,10 @@ SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 -- Relevant PG commit: -- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset -\elif :server_version_ge_16 +\else -- In PG16, read-only server settings lc_collate and lc_ctype are removed -- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982 SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset -\else -SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset \endif CREATE COLLATION test_schema_support.english (LOCALE = :current_locale); diff --git a/src/test/regress/sql/pg16.sql b/src/test/regress/sql/pg16.sql index bada3649c..cc46d79f2 100644 --- a/src/test/regress/sql/pg16.sql +++ b/src/test/regress/sql/pg16.sql @@ -1,14 +1,6 @@ -- -- PG16 -- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 -\gset -\if :server_version_ge_16 -\else -\q -\endif - CREATE SCHEMA pg16; SET search_path TO pg16; SET citus.next_shard_id TO 950000;