From 69af3e8509d346024bfe346196c45277aa0a409c Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Wed, 21 Jun 2023 14:18:23 +0300 Subject: [PATCH] Drop PG13 Support Phase 2 - Remove PG13 specific paths/tests (#7007) This commit is the second and last phase of dropping PG13 support. It consists of the following: - Removes all PG_VERSION_13 & PG_VERSION_14 from codepaths - Removes pg_version_compat entries and columnar_version_compat entries specific for PG13 - Removes alternative pg13 test outputs - Removes PG13 normalize lines and fix the test outputs based on that It is a continuation of https://github.com/citusdata/citus/commit/5bf163a27d42b0813110fe05452903dfa6c3db43 --- src/backend/columnar/columnar_debug.c | 2 +- src/backend/columnar/columnar_metadata.c | 19 +- src/backend/columnar/columnar_tableam.c | 35 +- .../commands/citus_global_signal.c | 12 - src/backend/distributed/commands/cluster.c | 8 - .../distributed/commands/dependencies.c | 12 +- .../commands/distribute_object_ops.c | 2 +- src/backend/distributed/commands/domain.c | 4 - src/backend/distributed/commands/function.c | 6 +- .../distributed/commands/local_multi_copy.c | 8 +- src/backend/distributed/commands/multi_copy.c | 166 +- src/backend/distributed/commands/sequence.c | 12 +- src/backend/distributed/commands/table.c | 37 +- src/backend/distributed/commands/type.c | 4 +- .../distributed/commands/utility_hook.c | 84 +- src/backend/distributed/commands/vacuum.c | 18 +- src/backend/distributed/commands/view.c | 2 +- .../connection/connection_management.c | 27 - .../distributed/deparser/citus_ruleutils.c | 21 +- .../deparser/deparse_sequence_stmts.c | 8 +- .../deparser/deparse_statistics_stmts.c | 31 - .../deparser/deparse_table_stmts.c | 4 +- .../distributed/deparser/deparse_type_stmts.c | 4 +- .../distributed/deparser/qualify_domain.c | 4 - .../deparser/qualify_sequence_stmt.c | 4 +- .../distributed/deparser/qualify_type_stmt.c | 2 +- .../distributed/deparser/ruleutils_13.c | 8131 ----------------- .../distributed/executor/adaptive_executor.c | 4 - .../distributed/executor/multi_executor.c | 6 +- .../distributed/executor/query_stats.c | 4 - src/backend/distributed/metadata/dependency.c | 11 +- src/backend/distributed/metadata/distobject.c | 8 +- .../distributed/metadata/metadata_sync.c | 8 +- .../distributed/metadata/metadata_utility.c | 4 - .../operations/worker_node_manager.c | 4 - .../operations/worker_shard_copy.c | 14 +- .../planner/insert_select_planner.c | 4 +- .../distributed/planner/multi_explain.c | 10 +- .../planner/multi_logical_optimizer.c | 13 +- .../planner/multi_router_planner.c | 11 - .../relation_restriction_equivalence.c | 4 +- .../replication/multi_logical_replication.c | 2 +- src/backend/distributed/shared_library_init.c | 4 - src/backend/distributed/test/fake_am.c | 19 - src/backend/distributed/test/xact_stats.c | 4 +- .../distributed/transaction/backend_data.c | 4 - .../distributed/transaction/lock_graph.c | 2 +- .../distributed/utils/background_jobs.c | 2 - src/backend/distributed/utils/citus_clauses.c | 6 +- src/backend/distributed/utils/enable_ssl.c | 5 - .../distributed/utils/function_utils.c | 2 +- src/backend/distributed/utils/listutils.c | 2 - src/backend/distributed/utils/log_utils.c | 2 - .../utils/multi_partitioning_utils.c | 8 +- .../columnar/columnar_version_compat.h | 23 - src/include/distributed/commands/multi_copy.h | 5 - .../distributed/commands/utility_hook.h | 2 - .../distributed/connection_management.h | 3 - .../distributed/pg_version_constants.h | 1 - src/include/pg_version_compat.h | 64 +- src/test/regress/bin/normalize.sed | 38 +- src/test/regress/citus_tests/run_test.py | 1 + .../background_task_queue_monitor.out | 8 +- src/test/regress/expected/cpu_priority.out | 43 +- .../regress/expected/generated_identity.out | 8 - .../regress/expected/generated_identity_0.out | 431 - .../expected/grant_on_schema_propagation.out | 4 +- .../grant_on_schema_propagation_0.out | 4 +- .../isolation_master_update_node_1.out | 66 - .../expected/local_shard_execution.out | 16 +- .../expected/local_shard_execution_0.out | 16 +- .../local_shard_execution_replicated.out | 16 +- .../local_shard_execution_replicated_0.out | 16 +- .../multi_alter_table_row_level_security.out | 3 +- src/test/regress/expected/multi_explain.out | 48 + .../regress/expected/multi_metadata_sync.out | 2 +- .../expected/multi_metadata_sync_0.out | 2 +- .../regress/expected/multi_mx_explain.out | 96 + src/test/regress/expected/pg14.out | 7 - src/test/regress/expected/pg14_0.out | 6 - src/test/regress/expected/sql_procedure.out | 4 +- src/test/regress/expected/stat_statements.out | 15 - src/test/regress/expected/tableam.out | 2 +- .../regress/expected/window_functions.out | 2 - .../regress/expected/window_functions_0.out | 1657 ---- .../spec/isolation_master_update_node.spec | 3 +- src/test/regress/sql/cpu_priority.sql | 3 - src/test/regress/sql/generated_identity.sql | 4 - .../sql/grant_on_schema_propagation.sql | 5 +- src/test/regress/sql/multi_metadata_sync.sql | 2 +- src/test/regress/sql/pg14.sql | 8 - src/test/regress/sql/stat_statements.sql | 15 - src/test/regress/sql/window_functions.sql | 2 - 93 files changed, 348 insertions(+), 11137 deletions(-) delete mode 100644 src/backend/distributed/deparser/ruleutils_13.c delete mode 100644 src/test/regress/expected/generated_identity_0.out delete mode 100644 src/test/regress/expected/isolation_master_update_node_1.out delete mode 100644 src/test/regress/expected/pg14_0.out delete mode 100644 src/test/regress/expected/window_functions_0.out diff --git a/src/backend/columnar/columnar_debug.c b/src/backend/columnar/columnar_debug.c index e6b19f768..cbb0d554f 100644 --- a/src/backend/columnar/columnar_debug.c +++ b/src/backend/columnar/columnar_debug.c @@ -159,5 +159,5 @@ MemoryContextTotals(MemoryContext context, MemoryContextCounters *counters) MemoryContextTotals(child, counters); } - context->methods->stats_compat(context, NULL, NULL, counters, true); + context->methods->stats(context, NULL, NULL, counters, true); } diff --git a/src/backend/columnar/columnar_metadata.c b/src/backend/columnar/columnar_metadata.c index 015df65eb..7fbc96419 100644 --- a/src/backend/columnar/columnar_metadata.c +++ b/src/backend/columnar/columnar_metadata.c @@ -1623,12 +1623,8 @@ StartModifyRelation(Relation rel) { EState *estate = create_estate_for_relation(rel); -#if PG_VERSION_NUM >= PG_VERSION_14 ResultRelInfo *resultRelInfo = makeNode(ResultRelInfo); InitResultRelInfo(resultRelInfo, rel, 1, NULL, 0); -#else - ResultRelInfo *resultRelInfo = estate->es_result_relation_info; -#endif /* ExecSimpleRelationInsert, ... require caller to open indexes */ ExecOpenIndices(resultRelInfo, false); @@ -1658,7 +1654,7 @@ InsertTupleAndEnforceConstraints(ModifyState *state, Datum *values, bool *nulls) ExecStoreHeapTuple(tuple, slot, false); /* use ExecSimpleRelationInsert to enforce constraints */ - ExecSimpleRelationInsert_compat(state->resultRelInfo, state->estate, slot); + ExecSimpleRelationInsert(state->resultRelInfo, state->estate, slot); } @@ -1689,12 +1685,8 @@ FinishModifyRelation(ModifyState *state) ExecCloseIndices(state->resultRelInfo); AfterTriggerEndQuery(state->estate); -#if PG_VERSION_NUM >= PG_VERSION_14 ExecCloseResultRelations(state->estate); ExecCloseRangeTableRelations(state->estate); -#else - ExecCleanUpTriggerState(state->estate); -#endif ExecResetTupleTable(state->estate->es_tupleTable, false); FreeExecutorState(state->estate); @@ -1723,15 +1715,6 @@ create_estate_for_relation(Relation rel) rte->rellockmode = AccessShareLock; ExecInitRangeTable(estate, list_make1(rte)); -#if PG_VERSION_NUM < PG_VERSION_14 - ResultRelInfo *resultRelInfo = makeNode(ResultRelInfo); - InitResultRelInfo(resultRelInfo, rel, 1, NULL, 0); - - estate->es_result_relations = resultRelInfo; - estate->es_num_result_relations = 1; - estate->es_result_relation_info = resultRelInfo; -#endif - estate->es_output_cid = GetCurrentCommandId(true); /* Prepare to catch AFTER triggers. */ diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 9fd6ba62d..4a08feb54 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -115,9 +115,7 @@ static RangeVar * ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions); static void ColumnarProcessUtility(PlannedStmt *pstmt, const char *queryString, -#if PG_VERSION_NUM >= PG_VERSION_14 bool readOnlyTree, -#endif ProcessUtilityContext context, ParamListInfo params, struct QueryEnvironment *queryEnv, @@ -665,7 +663,6 @@ columnar_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, } -#if PG_VERSION_NUM >= PG_VERSION_14 static TransactionId columnar_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) @@ -714,19 +711,6 @@ columnar_index_delete_tuples(Relation rel, } -#else -static TransactionId -columnar_compute_xid_horizon_for_tuples(Relation rel, - ItemPointerData *tids, - int nitems) -{ - elog(ERROR, "columnar_compute_xid_horizon_for_tuples not implemented"); -} - - -#endif - - static void columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate) @@ -1484,8 +1468,7 @@ columnar_index_build_range_scan(Relation columnarRelation, if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent) { /* ignore lazy VACUUM's */ - OldestXmin = GetOldestNonRemovableTransactionId_compat(columnarRelation, - PROCARRAY_FLAGS_VACUUM); + OldestXmin = GetOldestNonRemovableTransactionId(columnarRelation); } Snapshot snapshot = { 0 }; @@ -1813,8 +1796,8 @@ ColumnarReadMissingRowsIntoIndex(TableScanDesc scan, Relation indexRelation, Relation columnarRelation = scan->rs_rd; IndexUniqueCheck indexUniqueCheck = indexInfo->ii_Unique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO; - index_insert_compat(indexRelation, indexValues, indexNulls, columnarItemPointer, - columnarRelation, indexUniqueCheck, false, indexInfo); + index_insert(indexRelation, indexValues, indexNulls, columnarItemPointer, + columnarRelation, indexUniqueCheck, false, indexInfo); validateIndexState->tups_inserted += 1; } @@ -2240,21 +2223,17 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions static void ColumnarProcessUtility(PlannedStmt *pstmt, const char *queryString, -#if PG_VERSION_NUM >= PG_VERSION_14 bool readOnlyTree, -#endif ProcessUtilityContext context, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *completionTag) { -#if PG_VERSION_NUM >= PG_VERSION_14 if (readOnlyTree) { pstmt = copyObject(pstmt); } -#endif Node *parsetree = pstmt->utilityStmt; @@ -2371,8 +2350,8 @@ ColumnarProcessUtility(PlannedStmt *pstmt, CheckCitusColumnarAlterExtensionStmt(parsetree); } - PrevProcessUtilityHook_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtilityHook(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); if (columnarOptions != NIL) { @@ -2500,11 +2479,7 @@ static const TableAmRoutine columnar_am_methods = { .tuple_get_latest_tid = columnar_get_latest_tid, .tuple_tid_valid = columnar_tuple_tid_valid, .tuple_satisfies_snapshot = columnar_tuple_satisfies_snapshot, -#if PG_VERSION_NUM >= PG_VERSION_14 .index_delete_tuples = columnar_index_delete_tuples, -#else - .compute_xid_horizon_for_tuples = columnar_compute_xid_horizon_for_tuples, -#endif .tuple_insert = columnar_tuple_insert, .tuple_insert_speculative = columnar_tuple_insert_speculative, diff --git a/src/backend/distributed/commands/citus_global_signal.c b/src/backend/distributed/commands/citus_global_signal.c index 05b210ee2..8183d6673 100644 --- a/src/backend/distributed/commands/citus_global_signal.c +++ b/src/backend/distributed/commands/citus_global_signal.c @@ -81,13 +81,6 @@ CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig) { Assert((sig == SIGINT) || (sig == SIGTERM)); -#if PG_VERSION_NUM < PG_VERSION_14 - if (timeout != 0) - { - elog(ERROR, "timeout parameter is only supported on Postgres 14 or later"); - } -#endif - bool missingOk = false; int nodeId = ExtractNodeIdFromGlobalPID(globalPID, missingOk); int processId = ExtractProcessIdFromGlobalPID(globalPID); @@ -102,14 +95,9 @@ CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig) } else { -#if PG_VERSION_NUM >= PG_VERSION_14 appendStringInfo(cancelQuery, "SELECT pg_terminate_backend(%d::integer, %lu::bigint)", processId, timeout); -#else - appendStringInfo(cancelQuery, "SELECT pg_terminate_backend(%d::integer)", - processId); -#endif } int connectionFlags = 0; diff --git a/src/backend/distributed/commands/cluster.c b/src/backend/distributed/commands/cluster.c index 4cffbaf51..92fcb3ec6 100644 --- a/src/backend/distributed/commands/cluster.c +++ b/src/backend/distributed/commands/cluster.c @@ -114,13 +114,6 @@ PreprocessClusterStmt(Node *node, const char *clusterCommand, static bool IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt) { -#if PG_VERSION_NUM < PG_VERSION_14 - if (clusterStmt->options & CLUOPT_VERBOSE) - { - return true; - } - return false; -#else DefElem *opt = NULL; foreach_ptr(opt, clusterStmt->params) { @@ -130,5 +123,4 @@ IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt) } } return false; -#endif } diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index 0f736df7a..ceec83324 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -214,13 +214,7 @@ DeferErrorIfCircularDependencyExists(const ObjectAddress *objectAddress) dependency->objectId == objectAddress->objectId && dependency->objectSubId == objectAddress->objectSubId) { - char *objectDescription = NULL; - - #if PG_VERSION_NUM >= PG_VERSION_14 - objectDescription = getObjectDescription(objectAddress, false); - #else - objectDescription = getObjectDescription(objectAddress); - #endif + char *objectDescription = getObjectDescription(objectAddress, false); StringInfo detailInfo = makeStringInfo(); appendStringInfo(detailInfo, "\"%s\" circularly depends itself, resolve " @@ -529,9 +523,9 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) */ Assert(false); ereport(ERROR, (errmsg("unsupported object %s for distribution by citus", - getObjectTypeDescription_compat(dependency, + getObjectTypeDescription(dependency, - /* missingOk: */ false)), + /* missingOk: */ false)), errdetail( "citus tries to recreate an unsupported object on its workers"), errhint("please report a bug as this should not be happening"))); diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index b417e416e..3442b07f2 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -1531,7 +1531,7 @@ GetDistributeObjectOps(Node *node) case T_AlterTableStmt: { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - switch (AlterTableStmtObjType_compat(stmt)) + switch (stmt->objtype) { case OBJECT_TYPE: { diff --git a/src/backend/distributed/commands/domain.c b/src/backend/distributed/commands/domain.c index f14157278..392cbd6e2 100644 --- a/src/backend/distributed/commands/domain.c +++ b/src/backend/distributed/commands/domain.c @@ -206,11 +206,7 @@ MakeCollateClauseFromOid(Oid collationOid) List *objName = NIL; List *objArgs = NIL; - #if PG_VERSION_NUM >= PG_VERSION_14 getObjectIdentityParts(&collateAddress, &objName, &objArgs, false); - #else - getObjectIdentityParts(&collateAddress, &objName, &objArgs); - #endif char *name = NULL; foreach_ptr(name, objName) diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index c992bc4fb..9f579f5dc 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -1641,7 +1641,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString, * workers */ const char *functionName = - getObjectIdentity_compat(address, /* missingOk: */ false); + getObjectIdentity(address, /* missingOk: */ false); ereport(ERROR, (errmsg("distrtibuted functions are not allowed to depend on an " "extension"), errdetail("Function \"%s\" is already distributed. Functions from " @@ -1811,8 +1811,8 @@ GenerateBackupNameForProcCollision(const ObjectAddress *address) List *newProcName = list_make2(namespace, makeString(newName)); /* don't need to rename if the input arguments don't match */ - FuncCandidateList clist = FuncnameGetCandidates_compat(newProcName, numargs, NIL, - false, false, false, true); + FuncCandidateList clist = FuncnameGetCandidates(newProcName, numargs, NIL, + false, false, false, true); for (; clist; clist = clist->next) { if (memcmp(clist->args, argtypes, sizeof(Oid) * numargs) == 0) diff --git a/src/backend/distributed/commands/local_multi_copy.c b/src/backend/distributed/commands/local_multi_copy.c index 5cf01baf4..7dbf0ae36 100644 --- a/src/backend/distributed/commands/local_multi_copy.c +++ b/src/backend/distributed/commands/local_multi_copy.c @@ -216,10 +216,10 @@ DoLocalCopy(StringInfo buffer, Oid relationId, int64 shardId, CopyStmt *copyStat ParseState *pState = make_parsestate(NULL); (void) addRangeTableEntryForRelation(pState, shard, AccessShareLock, NULL, false, false); - CopyFromState cstate = BeginCopyFrom_compat(pState, shard, NULL, NULL, false, - ReadFromLocalBufferCallback, - copyStatement->attlist, - copyStatement->options); + CopyFromState cstate = BeginCopyFrom(pState, shard, NULL, NULL, false, + ReadFromLocalBufferCallback, + copyStatement->attlist, + copyStatement->options); CopyFrom(cstate); EndCopyFrom(cstate); diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index f8e6378d4..5d7c279a6 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -258,9 +258,6 @@ static CopyCoercionData * ColumnCoercionPaths(TupleDesc destTupleDescriptor, Oid *finalColumnTypeArray); static FmgrInfo * TypeOutputFunctions(uint32 columnCount, Oid *typeIdArray, bool binaryFormat); -#if PG_VERSION_NUM < PG_VERSION_14 -static List * CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist); -#endif static bool CopyStatementHasFormat(CopyStmt *copyStatement, char *formatName); static void CitusCopyFrom(CopyStmt *copyStatement, QueryCompletion *completionTag); static void EnsureCopyCanRunOnRelation(Oid relationId); @@ -609,14 +606,14 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag) } /* initialize copy state to read from COPY data source */ - CopyFromState copyState = BeginCopyFrom_compat(NULL, - copiedDistributedRelation, - NULL, - copyStatement->filename, - copyStatement->is_program, - NULL, - copyStatement->attlist, - copyStatement->options); + CopyFromState copyState = BeginCopyFrom(NULL, + copiedDistributedRelation, + NULL, + copyStatement->filename, + copyStatement->is_program, + NULL, + copyStatement->attlist, + copyStatement->options); /* set up callback to identify error line number */ errorCallback.callback = CopyFromErrorCallback; @@ -648,9 +645,7 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag) ++processedRowCount; -#if PG_VERSION_NUM >= PG_VERSION_14 pgstat_progress_update_param(PROGRESS_COPY_TUPLES_PROCESSED, processedRowCount); -#endif } EndCopyFrom(copyState); @@ -890,28 +885,8 @@ CanUseBinaryCopyFormatForType(Oid typeId) HeapTuple typeTup = typeidType(typeId); Form_pg_type type = (Form_pg_type) GETSTRUCT(typeTup); Oid elementType = type->typelem; -#if PG_VERSION_NUM < PG_VERSION_14 - char typeCategory = type->typcategory; -#endif ReleaseSysCache(typeTup); -#if PG_VERSION_NUM < PG_VERSION_14 - - /* - * In PG versions before PG14 the array_recv function would error out more - * than necessary. - * - * It errors out when the element type its oids don't match with the oid in - * the received data. This happens pretty much always for non built in - * types, because their oids differ between postgres intallations. So we - * skip binary encoding when the element type is a non built in type. - */ - if (typeCategory == TYPCATEGORY_ARRAY && elementType >= FirstNormalObjectId) - { - return false; - } -#endif - /* * Any type that is a wrapper around an element type (e.g. arrays and * ranges) require the element type to also has support for binary @@ -1682,20 +1657,6 @@ AppendCopyBinaryFooters(CopyOutState footerOutputState) static void SendCopyBegin(CopyOutState cstate) { -#if PG_VERSION_NUM < PG_VERSION_14 - if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3) { - /* old way */ - if (cstate->binary) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY BINARY is not supported to stdout or from stdin"))); - pq_putemptymessage('H'); - /* grottiness needed for old COPY OUT protocol */ - pq_startcopyout(); - cstate->copy_dest = COPY_OLD_FE; - return; - } -#endif StringInfoData buf; int natts = list_length(cstate->attnumlist); int16 format = (cstate->binary ? 1 : 0); @@ -1715,16 +1676,6 @@ SendCopyBegin(CopyOutState cstate) static void SendCopyEnd(CopyOutState cstate) { -#if PG_VERSION_NUM < PG_VERSION_14 - if (cstate->copy_dest != COPY_NEW_FE) - { - CopySendData(cstate, "\\.", 2); - /* Need to flush out the trailer (this also appends a newline) */ - CopySendEndOfRow(cstate, true); - pq_endcopyout(false); - return; - } -#endif /* Shouldn't have any unsent data */ Assert(cstate->fe_msgbuf->len == 0); /* Send Copy Done message */ @@ -1782,21 +1733,6 @@ CopySendEndOfRow(CopyOutState cstate, bool includeEndOfLine) switch (cstate->copy_dest) { -#if PG_VERSION_NUM < PG_VERSION_14 - case COPY_OLD_FE: - /* The FE/BE protocol uses \n as newline for all platforms */ - if (!cstate->binary && includeEndOfLine) - CopySendChar(cstate, '\n'); - - if (pq_putbytes(fe_msgbuf->data, fe_msgbuf->len)) - { - /* no hope of recovering connection sync, so FATAL */ - ereport(FATAL, - (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("connection lost during COPY to stdout"))); - } - break; -#endif case COPY_FRONTEND: /* The FE/BE protocol uses \n as newline for all platforms */ if (!cstate->binary && includeEndOfLine) @@ -3256,92 +3192,6 @@ CreateRangeTable(Relation rel, AclMode requiredAccess) } -#if PG_VERSION_NUM < PG_VERSION_14 - -/* Helper for CheckCopyPermissions(), copied from postgres */ -static List * -CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) -{ - /* *INDENT-OFF* */ - List *attnums = NIL; - - if (attnamelist == NIL) - { - /* Generate default column list */ - int attr_count = tupDesc->natts; - int i; - - for (i = 0; i < attr_count; i++) - { - if (TupleDescAttr(tupDesc, i)->attisdropped) - continue; - if (TupleDescAttr(tupDesc, i)->attgenerated) - continue; - attnums = lappend_int(attnums, i + 1); - } - } - else - { - /* Validate the user-supplied list and extract attnums */ - ListCell *l; - - foreach(l, attnamelist) - { - char *name = strVal(lfirst(l)); - int attnum; - int i; - - /* Lookup column name */ - attnum = InvalidAttrNumber; - for (i = 0; i < tupDesc->natts; i++) - { - Form_pg_attribute att = TupleDescAttr(tupDesc, i); - - if (att->attisdropped) - continue; - if (namestrcmp(&(att->attname), name) == 0) - { - if (att->attgenerated) - ereport(ERROR, - (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("column \"%s\" is a generated column", - name), - errdetail("Generated columns cannot be used in COPY."))); - attnum = att->attnum; - break; - } - } - if (attnum == InvalidAttrNumber) - { - if (rel != NULL) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - name, RelationGetRelationName(rel)))); - else - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" does not exist", - name))); - } - /* Check for duplicates */ - if (list_member_int(attnums, attnum)) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" specified more than once", - name))); - attnums = lappend_int(attnums, attnum); - } - } - - return attnums; - /* *INDENT-ON* */ -} - - -#endif - - /* * CreateConnectionStateHash constructs a hash table which maps from socket * number to CopyConnectionState, passing the provided MemoryContext to diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index 9289dcd58..9ff586c8c 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -668,7 +668,7 @@ PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, false); @@ -701,7 +701,7 @@ List * AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); RangeVar *sequence = stmt->relation; Oid seqOid = RangeVarGetRelid(sequence, NoLock, missing_ok); @@ -721,7 +721,7 @@ List * PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, true); @@ -755,7 +755,7 @@ PreprocessAlterSequencePersistenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, false); @@ -788,7 +788,7 @@ List * AlterSequencePersistenceStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); RangeVar *sequence = stmt->relation; Oid seqOid = RangeVarGetRelid(sequence, NoLock, missing_ok); @@ -811,7 +811,7 @@ PreprocessSequenceAlterTableStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); ListCell *cmdCell = NULL; foreach(cmdCell, stmt->cmds) diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 4ea28c71d..174c34946 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -1135,7 +1135,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, if (relKind == RELKIND_SEQUENCE) { AlterTableStmt *stmtCopy = copyObject(alterTableStatement); - AlterTableStmtObjType_compat(stmtCopy) = OBJECT_SEQUENCE; + stmtCopy->objtype = OBJECT_SEQUENCE; #if (PG_VERSION_NUM >= PG_VERSION_15) /* @@ -1165,7 +1165,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, * passes through an AlterTableStmt */ AlterTableStmt *stmtCopy = copyObject(alterTableStatement); - AlterTableStmtObjType_compat(stmtCopy) = OBJECT_VIEW; + stmtCopy->objtype = OBJECT_VIEW; return PreprocessAlterViewStmt((Node *) stmtCopy, alterTableCommand, processUtilityContext); } @@ -2521,13 +2521,13 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) char relKind = get_rel_relkind(relationId); if (relKind == RELKIND_SEQUENCE) { - AlterTableStmtObjType_compat(alterTableStatement) = OBJECT_SEQUENCE; + alterTableStatement->objtype = OBJECT_SEQUENCE; PostprocessAlterSequenceOwnerStmt((Node *) alterTableStatement, NULL); return; } else if (relKind == RELKIND_VIEW) { - AlterTableStmtObjType_compat(alterTableStatement) = OBJECT_VIEW; + alterTableStatement->objtype = OBJECT_VIEW; PostprocessAlterViewStmt((Node *) alterTableStatement, NULL); return; } @@ -3517,7 +3517,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) break; } -#if PG_VERSION_NUM >= PG_VERSION_14 case AT_DetachPartitionFinalize: { ereport(ERROR, (errmsg("ALTER TABLE .. DETACH PARTITION .. FINALIZE " @@ -3525,7 +3524,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) break; } -#endif case AT_DetachPartition: { /* we only allow partitioning commands if they are only subcommand */ @@ -3537,7 +3535,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) errhint("You can issue each subcommand " "separately."))); } - #if PG_VERSION_NUM >= PG_VERSION_14 + PartitionCmd *partitionCommand = (PartitionCmd *) command->def; if (partitionCommand->concurrent) @@ -3546,7 +3544,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) "CONCURRENTLY commands are currently " "unsupported."))); } - #endif break; } @@ -3589,20 +3586,18 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) case AT_NoForceRowSecurity: case AT_ValidateConstraint: case AT_DropConstraint: /* we do the check for invalidation in AlterTableDropsForeignKey */ -#if PG_VERSION_NUM >= PG_VERSION_14 case AT_SetCompression: -#endif - { - /* - * We will not perform any special check for: - * ALTER TABLE .. SET ACCESS METHOD .. - * ALTER TABLE .. ALTER COLUMN .. SET NOT NULL - * ALTER TABLE .. REPLICA IDENTITY .. - * ALTER TABLE .. VALIDATE CONSTRAINT .. - * ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION .. - */ - break; - } + { + /* + * We will not perform any special check for: + * ALTER TABLE .. SET ACCESS METHOD .. + * ALTER TABLE .. ALTER COLUMN .. SET NOT NULL + * ALTER TABLE .. REPLICA IDENTITY .. + * ALTER TABLE .. VALIDATE CONSTRAINT .. + * ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION .. + */ + break; + } case AT_SetRelOptions: /* SET (...) */ case AT_ResetRelOptions: /* RESET (...) */ diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index 3e641fad0..24ca91aeb 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -350,7 +350,7 @@ List * AlterTypeStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE); + Assert(stmt->objtype == OBJECT_TYPE); TypeName *typeName = MakeTypeNameFromRangeVar(stmt->relation); Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); @@ -549,7 +549,7 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress) const char *username = GetUserNameFromId(GetTypeOwner(typeAddress->objectId), false); initStringInfo(&buf); appendStringInfo(&buf, ALTER_TYPE_OWNER_COMMAND, - getObjectIdentity_compat(typeAddress, false), + getObjectIdentity(typeAddress, false), quote_identifier(username)); ddlCommands = lappend(ddlCommands, buf.data); diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 7cc997fa1..888b3dfed 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -33,9 +33,6 @@ #include "access/attnum.h" #include "access/heapam.h" #include "access/htup_details.h" -#if PG_VERSION_NUM < 140000 -#include "access/xact.h" -#endif #include "catalog/catalog.h" #include "catalog/dependency.h" #include "citus_version.h" @@ -60,9 +57,6 @@ #include "distributed/maintenanced.h" #include "distributed/multi_logical_replication.h" #include "distributed/multi_partitioning_utils.h" -#if PG_VERSION_NUM < 140000 -#include "distributed/metadata_cache.h" -#endif #include "distributed/metadata_sync.h" #include "distributed/metadata/distobject.h" #include "distributed/multi_executor.h" @@ -107,9 +101,7 @@ static void ProcessUtilityInternal(PlannedStmt *pstmt, struct QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *completionTag); -#if PG_VERSION_NUM >= 140000 static void set_indexsafe_procflags(void); -#endif static char * CurrentSearchPath(void); static void IncrementUtilityHookCountersIfNecessary(Node *parsetree); static void PostStandardProcessUtility(Node *parsetree); @@ -131,8 +123,8 @@ ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityConte plannedStmt->commandType = CMD_UTILITY; plannedStmt->utilityStmt = node; - ProcessUtility_compat(plannedStmt, queryString, false, context, params, NULL, dest, - completionTag); + ProcessUtility(plannedStmt, queryString, false, context, params, NULL, dest, + completionTag); } @@ -148,25 +140,19 @@ ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityConte void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString, -#if PG_VERSION_NUM >= PG_VERSION_14 bool readOnlyTree, -#endif ProcessUtilityContext context, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *completionTag) { - Node *parsetree; - -#if PG_VERSION_NUM >= PG_VERSION_14 if (readOnlyTree) { pstmt = copyObject(pstmt); } -#endif - parsetree = pstmt->utilityStmt; + Node *parsetree = pstmt->utilityStmt; if (IsA(parsetree, TransactionStmt)) { @@ -199,8 +185,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, * that state. Since we never need to intercept transaction statements, * skip our checks and immediately fall into standard_ProcessUtility. */ - PrevProcessUtility_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtility(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); return; } @@ -244,8 +230,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, * Ensure that utility commands do not behave any differently until CREATE * EXTENSION is invoked. */ - PrevProcessUtility_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtility(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); return; } @@ -276,8 +262,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, PG_TRY(); { - PrevProcessUtility_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtility(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); StoredProcedureLevel -= 1; @@ -310,8 +296,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, PG_TRY(); { - PrevProcessUtility_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtility(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); DoBlockLevel -= 1; } @@ -649,8 +635,8 @@ ProcessUtilityInternal(PlannedStmt *pstmt, if (IsA(parsetree, AlterTableStmt)) { AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree; - if (AlterTableStmtObjType_compat(alterTableStmt) == OBJECT_TABLE || - AlterTableStmtObjType_compat(alterTableStmt) == OBJECT_FOREIGN_TABLE) + if (alterTableStmt->objtype == OBJECT_TABLE || + alterTableStmt->objtype == OBJECT_FOREIGN_TABLE) { ErrorIfAlterDropsPartitionColumn(alterTableStmt); @@ -769,8 +755,8 @@ ProcessUtilityInternal(PlannedStmt *pstmt, PreprocessAlterExtensionCitusStmtForCitusColumnar(parsetree); } - PrevProcessUtility_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtility(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); if (isAlterExtensionUpdateCitusStmt) { @@ -1208,38 +1194,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) */ if (ddlJob->startNewTransaction) { -#if PG_VERSION_NUM < 140000 - - /* - * Older versions of postgres doesn't have PROC_IN_SAFE_IC flag - * so we cannot use set_indexsafe_procflags in those versions. - * - * For this reason, we do our best to ensure not grabbing any - * snapshots later in the executor. - */ - - /* - * If cache is not populated, system catalog lookups will cause - * the xmin of current backend to change. Then the last phase - * of CREATE INDEX CONCURRENTLY, which is in a separate backend, - * will hang waiting for our backend and result in a deadlock. - * - * We populate the cache before starting the next transaction to - * avoid this. Most of the metadata has already been resolved in - * planning phase, we only need to lookup metadata needed for - * connection establishment. - */ - (void) CurrentDatabaseName(); - - /* - * ConnParams (AuthInfo and PoolInfo) gets a snapshot, which - * will blocks the remote connections to localhost. Hence we warm up - * the cache here so that after we start a new transaction, the entries - * will already be in the hash table, hence we won't be holding any snapshots. - */ - WarmUpConnParamsHash(); -#endif - /* * Since it is not certain whether the code-path that we followed * until reaching here caused grabbing any snapshots or not, we @@ -1258,8 +1212,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) CommitTransactionCommand(); StartTransactionCommand(); -#if PG_VERSION_NUM >= 140000 - /* * Tell other backends to ignore us, even if we grab any * snapshots via adaptive executor. @@ -1274,7 +1226,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) * given above. */ Assert(localExecutionSupported == false); -#endif } MemoryContext savedContext = CurrentMemoryContext; @@ -1340,8 +1291,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) } -#if PG_VERSION_NUM >= 140000 - /* * set_indexsafe_procflags sets PROC_IN_SAFE_IC flag in MyProc->statusFlags. * @@ -1364,9 +1313,6 @@ set_indexsafe_procflags(void) } -#endif - - /* * CurrentSearchPath is a C interface for calling current_schemas(bool) that * PostgreSQL exports. diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index 274aebb8f..6bc76b7b8 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -359,12 +359,12 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams) { appendStringInfoString(vacuumPrefix, "SKIP_LOCKED,"); } - #if PG_VERSION_NUM >= PG_VERSION_14 + if (vacuumFlags & VACOPT_PROCESS_TOAST) { appendStringInfoString(vacuumPrefix, "PROCESS_TOAST,"); } - #endif + if (vacuumParams.truncate != VACOPTVALUE_UNSPECIFIED) { appendStringInfoString(vacuumPrefix, @@ -389,13 +389,11 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams) break; } - #if PG_VERSION_NUM >= PG_VERSION_14 case VACOPTVALUE_AUTO: { appendStringInfoString(vacuumPrefix, "INDEX_CLEANUP auto,"); break; } - #endif default: { @@ -501,9 +499,7 @@ VacuumStmtParams(VacuumStmt *vacstmt) bool freeze = false; bool full = false; bool disable_page_skipping = false; - #if PG_VERSION_NUM >= PG_VERSION_14 bool process_toast = false; - #endif /* Set default value */ params.index_cleanup = VACOPTVALUE_UNSPECIFIED; @@ -547,16 +543,12 @@ VacuumStmtParams(VacuumStmt *vacstmt) { disable_page_skipping = defGetBoolean(opt); } - #if PG_VERSION_NUM >= PG_VERSION_14 else if (strcmp(opt->defname, "process_toast") == 0) { process_toast = defGetBoolean(opt); } - #endif else if (strcmp(opt->defname, "index_cleanup") == 0) { - #if PG_VERSION_NUM >= PG_VERSION_14 - /* Interpret no string as the default, which is 'auto' */ if (!opt->arg) { @@ -577,10 +569,6 @@ VacuumStmtParams(VacuumStmt *vacstmt) VACOPTVALUE_DISABLED; } } - #else - params.index_cleanup = defGetBoolean(opt) ? VACOPTVALUE_ENABLED : - VACOPTVALUE_DISABLED; - #endif } else if (strcmp(opt->defname, "truncate") == 0) { @@ -625,9 +613,7 @@ VacuumStmtParams(VacuumStmt *vacstmt) (analyze ? VACOPT_ANALYZE : 0) | (freeze ? VACOPT_FREEZE : 0) | (full ? VACOPT_FULL : 0) | - #if PG_VERSION_NUM >= PG_VERSION_14 (process_toast ? VACOPT_PROCESS_TOAST : 0) | - #endif (disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0); return params; } diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c index 8219a2907..02d6815d9 100644 --- a/src/backend/distributed/commands/view.c +++ b/src/backend/distributed/commands/view.c @@ -598,7 +598,7 @@ List * PostprocessAlterViewStmt(Node *node, const char *queryString) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_VIEW); + Assert(stmt->objtype == OBJECT_VIEW); List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true, true); diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index e4aca3ee7..46e757bfe 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -1314,33 +1314,6 @@ StartConnectionEstablishment(MultiConnection *connection, ConnectionHashKey *key } -#if PG_VERSION_NUM < 140000 - -/* - * WarmUpConnParamsHash warms up the ConnParamsHash by loading all the - * conn params for active primary nodes. - */ -void -WarmUpConnParamsHash(void) -{ - List *workerNodeList = ActivePrimaryNodeList(AccessShareLock); - WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) - { - ConnectionHashKey key; - strlcpy(key.hostname, workerNode->workerName, MAX_NODE_LENGTH); - key.port = workerNode->workerPort; - strlcpy(key.database, CurrentDatabaseName(), NAMEDATALEN); - strlcpy(key.user, CurrentUserName(), NAMEDATALEN); - key.replicationConnParam = false; - FindOrCreateConnParamsEntry(&key); - } -} - - -#endif - - /* * FindOrCreateConnParamsEntry searches ConnParamsHash for the given key, * if it is not found, it is created. diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index 05e483766..6b865e061 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -22,9 +22,7 @@ #include "access/skey.h" #include "access/stratnum.h" #include "access/sysattr.h" -#if PG_VERSION_NUM >= PG_VERSION_14 #include "access/toast_compression.h" -#endif #include "access/tupdesc.h" #include "catalog/dependency.h" #include "catalog/indexing.h" @@ -386,13 +384,11 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults atttypmod); appendStringInfoString(&buffer, attributeTypeName); -#if PG_VERSION_NUM >= PG_VERSION_14 if (CompressionMethodIsValid(attributeForm->attcompression)) { appendStringInfo(&buffer, " COMPRESSION %s", GetCompressionMethodName(attributeForm->attcompression)); } -#endif if (attributeForm->attidentity && includeIdentityDefaults) { @@ -939,17 +935,6 @@ deparse_shard_reindex_statement(ReindexStmt *origStmt, Oid distrelid, int64 shar bool IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param) { -#if PG_VERSION_NUM < PG_VERSION_14 - if (strcmp(param, "concurrently") == 0) - { - return reindexStmt->concurrent; - } - else if (strcmp(param, "verbose") == 0) - { - return reindexStmt->options & REINDEXOPT_VERBOSE; - } - return false; -#else DefElem *opt = NULL; foreach_ptr(opt, reindexStmt->params) { @@ -959,7 +944,6 @@ IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param) } } return false; -#endif } @@ -974,7 +958,7 @@ AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer) { appendStringInfoString(temp, "VERBOSE"); } -#if PG_VERSION_NUM >= PG_VERSION_14 + char *tableSpaceName = NULL; DefElem *opt = NULL; foreach_ptr(opt, reindexStmt->params) @@ -997,7 +981,6 @@ AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer) appendStringInfo(temp, "TABLESPACE %s", tableSpaceName); } } -#endif if (temp->len > 0) { @@ -1627,9 +1610,7 @@ RoleSpecString(RoleSpec *spec, bool withQuoteIdentifier) spec->rolename; } - #if PG_VERSION_NUM >= PG_VERSION_14 case ROLESPEC_CURRENT_ROLE: - #endif case ROLESPEC_CURRENT_USER: { return withQuoteIdentifier ? diff --git a/src/backend/distributed/deparser/deparse_sequence_stmts.c b/src/backend/distributed/deparser/deparse_sequence_stmts.c index 80c4e2dd4..de2afdeec 100644 --- a/src/backend/distributed/deparser/deparse_sequence_stmts.c +++ b/src/backend/distributed/deparser/deparse_sequence_stmts.c @@ -193,7 +193,7 @@ DeparseAlterSequenceOwnerStmt(Node *node) StringInfoData str = { 0 }; initStringInfo(&str); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); AppendAlterSequenceOwnerStmt(&str, stmt); @@ -208,7 +208,7 @@ DeparseAlterSequenceOwnerStmt(Node *node) static void AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt) { - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); RangeVar *seq = stmt->relation; char *qualifiedSequenceName = quote_qualified_identifier(seq->schemaname, seq->relname); @@ -274,7 +274,7 @@ DeparseAlterSequencePersistenceStmt(Node *node) StringInfoData str = { 0 }; initStringInfo(&str); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); AppendAlterSequencePersistenceStmt(&str, stmt); @@ -289,7 +289,7 @@ DeparseAlterSequencePersistenceStmt(Node *node) static void AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt) { - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); RangeVar *seq = stmt->relation; char *qualifiedSequenceName = quote_qualified_identifier(seq->schemaname, diff --git a/src/backend/distributed/deparser/deparse_statistics_stmts.c b/src/backend/distributed/deparser/deparse_statistics_stmts.c index 923af645e..4a165ec72 100644 --- a/src/backend/distributed/deparser/deparse_statistics_stmts.c +++ b/src/backend/distributed/deparser/deparse_statistics_stmts.c @@ -229,7 +229,6 @@ AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt) } -#if PG_VERSION_NUM >= PG_VERSION_14 static void AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt) { @@ -257,36 +256,6 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt) } -#else -static void -AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt) -{ - ColumnRef *column = NULL; - - foreach_ptr(column, stmt->exprs) - { - if (!IsA(column, ColumnRef) || list_length(column->fields) != 1) - { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg( - "only simple column references are allowed in CREATE STATISTICS"))); - } - - char *columnName = NameListToQuotedString(column->fields); - - appendStringInfoString(buf, columnName); - - if (column != llast(stmt->exprs)) - { - appendStringInfoString(buf, ", "); - } - } -} - - -#endif - static void AppendTableName(StringInfo buf, CreateStatsStmt *stmt) { diff --git a/src/backend/distributed/deparser/deparse_table_stmts.c b/src/backend/distributed/deparser/deparse_table_stmts.c index 6e0dd3f06..1d9ee1739 100644 --- a/src/backend/distributed/deparser/deparse_table_stmts.c +++ b/src/backend/distributed/deparser/deparse_table_stmts.c @@ -77,7 +77,7 @@ DeparseAlterTableStmt(Node *node) StringInfoData str = { 0 }; initStringInfo(&str); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TABLE); + Assert(stmt->objtype == OBJECT_TABLE); AppendAlterTableStmt(&str, stmt); return str.data; @@ -96,7 +96,7 @@ AppendAlterTableStmt(StringInfo buf, AlterTableStmt *stmt) stmt->relation->relname); ListCell *cmdCell = NULL; - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TABLE); + Assert(stmt->objtype == OBJECT_TABLE); appendStringInfo(buf, "ALTER TABLE %s", identifier); foreach(cmdCell, stmt->cmds) diff --git a/src/backend/distributed/deparser/deparse_type_stmts.c b/src/backend/distributed/deparser/deparse_type_stmts.c index e12d96ad9..1d70c6791 100644 --- a/src/backend/distributed/deparser/deparse_type_stmts.c +++ b/src/backend/distributed/deparser/deparse_type_stmts.c @@ -122,7 +122,7 @@ DeparseAlterTypeStmt(Node *node) StringInfoData str = { 0 }; initStringInfo(&str); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE); + Assert(stmt->objtype == OBJECT_TYPE); AppendAlterTypeStmt(&str, stmt); @@ -137,7 +137,7 @@ AppendAlterTypeStmt(StringInfo buf, AlterTableStmt *stmt) stmt->relation->relname); ListCell *cmdCell = NULL; - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE); + Assert(stmt->objtype == OBJECT_TYPE); appendStringInfo(buf, "ALTER TYPE %s", identifier); foreach(cmdCell, stmt->cmds) diff --git a/src/backend/distributed/deparser/qualify_domain.c b/src/backend/distributed/deparser/qualify_domain.c index b36a0a713..2e163dad0 100644 --- a/src/backend/distributed/deparser/qualify_domain.c +++ b/src/backend/distributed/deparser/qualify_domain.c @@ -245,11 +245,7 @@ QualifyCollate(CollateClause *collClause, bool missing_ok) List *objName = NIL; List *objArgs = NIL; - #if PG_VERSION_NUM >= PG_VERSION_14 getObjectIdentityParts(&collationAddress, &objName, &objArgs, false); - #else - getObjectIdentityParts(&collationAddress, &objName, &objArgs); - #endif collClause->collname = NIL; char *name = NULL; diff --git a/src/backend/distributed/deparser/qualify_sequence_stmt.c b/src/backend/distributed/deparser/qualify_sequence_stmt.c index cece902a6..384e0c953 100644 --- a/src/backend/distributed/deparser/qualify_sequence_stmt.c +++ b/src/backend/distributed/deparser/qualify_sequence_stmt.c @@ -34,7 +34,7 @@ void QualifyAlterSequenceOwnerStmt(Node *node) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); RangeVar *seq = stmt->relation; @@ -62,7 +62,7 @@ void QualifyAlterSequencePersistenceStmt(Node *node) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); + Assert(stmt->objtype == OBJECT_SEQUENCE); RangeVar *seq = stmt->relation; diff --git a/src/backend/distributed/deparser/qualify_type_stmt.c b/src/backend/distributed/deparser/qualify_type_stmt.c index 33c80f527..487e6fc97 100644 --- a/src/backend/distributed/deparser/qualify_type_stmt.c +++ b/src/backend/distributed/deparser/qualify_type_stmt.c @@ -123,7 +123,7 @@ void QualifyAlterTypeStmt(Node *node) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE); + Assert(stmt->objtype == OBJECT_TYPE); if (stmt->relation->schemaname == NULL) { diff --git a/src/backend/distributed/deparser/ruleutils_13.c b/src/backend/distributed/deparser/ruleutils_13.c deleted file mode 100644 index 31ef67f97..000000000 --- a/src/backend/distributed/deparser/ruleutils_13.c +++ /dev/null @@ -1,8131 +0,0 @@ -/*------------------------------------------------------------------------- - * - * ruleutils_13.c - * Functions to convert stored expressions/querytrees back to - * source text - * - * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/distributed/deparser/ruleutils_13.c - * - * This needs to be closely in sync with the core code. - *------------------------------------------------------------------------- - */ -#include "distributed/pg_version_constants.h" - -#include "pg_config.h" - -#if (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) - -#include "postgres.h" - -#include -#include -#include - -#include "access/amapi.h" -#include "access/htup_details.h" -#include "access/relation.h" -#include "access/sysattr.h" -#include "access/table.h" -#include "catalog/dependency.h" -#include "catalog/indexing.h" -#include "catalog/pg_aggregate.h" -#include "catalog/pg_am.h" -#include "catalog/pg_authid.h" -#include "catalog/pg_collation.h" -#include "catalog/pg_constraint.h" -#include "catalog/pg_depend.h" -#include "catalog/pg_extension.h" -#include "catalog/pg_foreign_data_wrapper.h" -#include "catalog/pg_language.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_operator.h" -#include "catalog/pg_partitioned_table.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_statistic_ext.h" -#include "catalog/pg_trigger.h" -#include "catalog/pg_type.h" -#include "commands/defrem.h" -#include "commands/extension.h" -#include "commands/tablespace.h" -#include "common/keywords.h" -#include "distributed/citus_nodefuncs.h" -#include "distributed/citus_ruleutils.h" -#include "executor/spi.h" -#include "foreign/foreign.h" -#include "funcapi.h" -#include "mb/pg_wchar.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/pathnodes.h" -#include "optimizer/optimizer.h" -#include "parser/parse_node.h" -#include "parser/parse_agg.h" -#include "parser/parse_func.h" -#include "parser/parse_node.h" -#include "parser/parse_oper.h" -#include "parser/parser.h" -#include "parser/parsetree.h" -#include "rewrite/rewriteHandler.h" -#include "rewrite/rewriteManip.h" -#include "rewrite/rewriteSupport.h" -#include "utils/array.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/hsearch.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/ruleutils.h" -#include "utils/snapmgr.h" -#include "utils/syscache.h" -#include "utils/typcache.h" -#include "utils/varlena.h" -#include "utils/xml.h" - - -/* ---------- - * Pretty formatting constants - * ---------- - */ - -/* Indent counts */ -#define PRETTYINDENT_STD 8 -#define PRETTYINDENT_JOIN 4 -#define PRETTYINDENT_VAR 4 - -#define PRETTYINDENT_LIMIT 40 /* wrap limit */ - -/* Pretty flags */ -#define PRETTYFLAG_PAREN 0x0001 -#define PRETTYFLAG_INDENT 0x0002 - -/* Default line length for pretty-print wrapping: 0 means wrap always */ -#define WRAP_COLUMN_DEFAULT 0 - -/* macros to test if pretty action needed */ -#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN) -#define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT) - - -/* ---------- - * Local data types - * ---------- - */ - -/* Context info needed for invoking a recursive querytree display routine */ -typedef struct -{ - StringInfo buf; /* output buffer to append to */ - List *namespaces; /* List of deparse_namespace nodes */ - List *windowClause; /* Current query level's WINDOW clause */ - List *windowTList; /* targetlist for resolving WINDOW clause */ - int prettyFlags; /* enabling of pretty-print functions */ - int wrapColumn; /* max line length, or -1 for no limit */ - int indentLevel; /* current indent level for prettyprint */ - bool varprefix; /* true to print prefixes on Vars */ - Oid distrelid; /* the distributed table being modified, if valid */ - int64 shardid; /* a distributed table's shardid, if positive */ - ParseExprKind special_exprkind; /* set only for exprkinds needing special - * handling */ - Bitmapset *appendparents; /* if not null, map child Vars of these relids - * back to the parent rel */ -} deparse_context; - -/* - * Each level of query context around a subtree needs a level of Var namespace. - * A Var having varlevelsup=N refers to the N'th item (counting from 0) in - * the current context's namespaces list. - * - * The rangetable is the list of actual RTEs from the query tree, and the - * cte list is the list of actual CTEs. - * - * rtable_names holds the alias name to be used for each RTE (either a C - * string, or NULL for nameless RTEs such as unnamed joins). - * rtable_columns holds the column alias names to be used for each RTE. - * - * In some cases we need to make names of merged JOIN USING columns unique - * across the whole query, not only per-RTE. If so, unique_using is true - * and using_names is a list of C strings representing names already assigned - * to USING columns. - * - * When deparsing plan trees, there is always just a single item in the - * deparse_namespace list (since a plan tree never contains Vars with - * varlevelsup > 0). We store the PlanState node that is the immediate - * parent of the expression to be deparsed, as well as a list of that - * PlanState's ancestors. In addition, we store its outer and inner subplan - * state nodes, as well as their plan nodes' targetlists, and the index tlist - * if the current plan node might contain INDEX_VAR Vars. (These fields could - * be derived on-the-fly from the current PlanState, but it seems notationally - * clearer to set them up as separate fields.) - */ -typedef struct -{ - List *rtable; /* List of RangeTblEntry nodes */ - List *rtable_names; /* Parallel list of names for RTEs */ - List *rtable_columns; /* Parallel list of deparse_columns structs */ - List *subplans; /* List of Plan trees for SubPlans */ - List *ctes; /* List of CommonTableExpr nodes */ - AppendRelInfo **appendrels; /* Array of AppendRelInfo nodes, or NULL */ - /* Workspace for column alias assignment: */ - bool unique_using; /* Are we making USING names globally unique */ - List *using_names; /* List of assigned names for USING columns */ - /* Remaining fields are used only when deparsing a Plan tree: */ - Plan *plan; /* immediate parent of current expression */ - List *ancestors; /* ancestors of planstate */ - Plan *outer_plan; /* outer subnode, or NULL if none */ - Plan *inner_plan; /* inner subnode, or NULL if none */ - List *outer_tlist; /* referent for OUTER_VAR Vars */ - List *inner_tlist; /* referent for INNER_VAR Vars */ - List *index_tlist; /* referent for INDEX_VAR Vars */ -} deparse_namespace; - -/* Callback signature for resolve_special_varno() */ -typedef void (*rsv_callback) (Node *node, deparse_context *context, - void *callback_arg); - -/* - * Per-relation data about column alias names. - * - * Selecting aliases is unreasonably complicated because of the need to dump - * rules/views whose underlying tables may have had columns added, deleted, or - * renamed since the query was parsed. We must nonetheless print the rule/view - * in a form that can be reloaded and will produce the same results as before. - * - * For each RTE used in the query, we must assign column aliases that are - * unique within that RTE. SQL does not require this of the original query, - * but due to factors such as *-expansion we need to be able to uniquely - * reference every column in a decompiled query. As long as we qualify all - * column references, per-RTE uniqueness is sufficient for that. - * - * However, we can't ensure per-column name uniqueness for unnamed join RTEs, - * since they just inherit column names from their input RTEs, and we can't - * rename the columns at the join level. Most of the time this isn't an issue - * because we don't need to reference the join's output columns as such; we - * can reference the input columns instead. That approach can fail for merged - * JOIN USING columns, however, so when we have one of those in an unnamed - * join, we have to make that column's alias globally unique across the whole - * query to ensure it can be referenced unambiguously. - * - * Another problem is that a JOIN USING clause requires the columns to be - * merged to have the same aliases in both input RTEs, and that no other - * columns in those RTEs or their children conflict with the USING names. - * To handle that, we do USING-column alias assignment in a recursive - * traversal of the query's jointree. When descending through a JOIN with - * USING, we preassign the USING column names to the child columns, overriding - * other rules for column alias assignment. We also mark each RTE with a list - * of all USING column names selected for joins containing that RTE, so that - * when we assign other columns' aliases later, we can avoid conflicts. - * - * Another problem is that if a JOIN's input tables have had columns added or - * deleted since the query was parsed, we must generate a column alias list - * for the join that matches the current set of input columns --- otherwise, a - * change in the number of columns in the left input would throw off matching - * of aliases to columns of the right input. Thus, positions in the printable - * column alias list are not necessarily one-for-one with varattnos of the - * JOIN, so we need a separate new_colnames[] array for printing purposes. - */ -typedef struct -{ - /* - * colnames is an array containing column aliases to use for columns that - * existed when the query was parsed. Dropped columns have NULL entries. - * This array can be directly indexed by varattno to get a Var's name. - * - * Non-NULL entries are guaranteed unique within the RTE, *except* when - * this is for an unnamed JOIN RTE. In that case we merely copy up names - * from the two input RTEs. - * - * During the recursive descent in set_using_names(), forcible assignment - * of a child RTE's column name is represented by pre-setting that element - * of the child's colnames array. So at that stage, NULL entries in this - * array just mean that no name has been preassigned, not necessarily that - * the column is dropped. - */ - int num_cols; /* length of colnames[] array */ - char **colnames; /* array of C strings and NULLs */ - - /* - * new_colnames is an array containing column aliases to use for columns - * that would exist if the query was re-parsed against the current - * definitions of its base tables. This is what to print as the column - * alias list for the RTE. This array does not include dropped columns, - * but it will include columns added since original parsing. Indexes in - * it therefore have little to do with current varattno values. As above, - * entries are unique unless this is for an unnamed JOIN RTE. (In such an - * RTE, we never actually print this array, but we must compute it anyway - * for possible use in computing column names of upper joins.) The - * parallel array is_new_col marks which of these columns are new since - * original parsing. Entries with is_new_col false must match the - * non-NULL colnames entries one-for-one. - */ - int num_new_cols; /* length of new_colnames[] array */ - char **new_colnames; /* array of C strings */ - bool *is_new_col; /* array of bool flags */ - - /* This flag tells whether we should actually print a column alias list */ - bool printaliases; - - /* This list has all names used as USING names in joins above this RTE */ - List *parentUsing; /* names assigned to parent merged columns */ - - /* - * If this struct is for a JOIN RTE, we fill these fields during the - * set_using_names() pass to describe its relationship to its child RTEs. - * - * leftattnos and rightattnos are arrays with one entry per existing - * output column of the join (hence, indexable by join varattno). For a - * simple reference to a column of the left child, leftattnos[i] is the - * child RTE's attno and rightattnos[i] is zero; and conversely for a - * column of the right child. But for merged columns produced by JOIN - * USING/NATURAL JOIN, both leftattnos[i] and rightattnos[i] are nonzero. - * Also, if the column has been dropped, both are zero. - * - * If it's a JOIN USING, usingNames holds the alias names selected for the - * merged columns (these might be different from the original USING list, - * if we had to modify names to achieve uniqueness). - */ - int leftrti; /* rangetable index of left child */ - int rightrti; /* rangetable index of right child */ - int *leftattnos; /* left-child varattnos of join cols, or 0 */ - int *rightattnos; /* right-child varattnos of join cols, or 0 */ - List *usingNames; /* names assigned to merged columns */ -} deparse_columns; - -/* This macro is analogous to rt_fetch(), but for deparse_columns structs */ -#define deparse_columns_fetch(rangetable_index, dpns) \ - ((deparse_columns *) list_nth((dpns)->rtable_columns, (rangetable_index)-1)) - -/* - * Entry in set_rtable_names' hash table - */ -typedef struct -{ - char name[NAMEDATALEN]; /* Hash key --- must be first */ - int counter; /* Largest addition used so far for name */ -} NameHashEntry; - - -/* ---------- - * Local functions - * - * Most of these functions used to use fixed-size buffers to build their - * results. Now, they take an (already initialized) StringInfo object - * as a parameter, and append their text output to its contents. - * ---------- - */ -static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, - Bitmapset *rels_used); -static void set_deparse_for_query(deparse_namespace *dpns, Query *query, - List *parent_namespaces); -static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode); -static void set_using_names(deparse_namespace *dpns, Node *jtnode, - List *parentUsing); -static void set_relation_column_names(deparse_namespace *dpns, - RangeTblEntry *rte, - deparse_columns *colinfo); -static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo); -static bool colname_is_unique(const char *colname, deparse_namespace *dpns, - deparse_columns *colinfo); -static char *make_colname_unique(char *colname, deparse_namespace *dpns, - deparse_columns *colinfo); -static void expand_colnames_array_to(deparse_columns *colinfo, int n); -static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, - deparse_columns *colinfo); -static char *get_rtable_name(int rtindex, deparse_context *context); -static void set_deparse_plan(deparse_namespace *dpns, Plan *plan); -static void push_child_plan(deparse_namespace *dpns, Plan *plan, - deparse_namespace *save_dpns); -static void pop_child_plan(deparse_namespace *dpns, - deparse_namespace *save_dpns); -static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, - deparse_namespace *save_dpns); -static void pop_ancestor_plan(deparse_namespace *dpns, - deparse_namespace *save_dpns); -static void get_query_def(Query *query, StringInfo buf, List *parentnamespace, - TupleDesc resultDesc, - int prettyFlags, int wrapColumn, int startIndent); -static void get_query_def_extended(Query *query, StringInfo buf, - List *parentnamespace, Oid distrelid, int64 shardid, - TupleDesc resultDesc, int prettyFlags, int wrapColumn, - int startIndent); -static void get_values_def(List *values_lists, deparse_context *context); -static void get_with_clause(Query *query, deparse_context *context); -static void get_select_query_def(Query *query, deparse_context *context, - TupleDesc resultDesc); -static void get_insert_query_def(Query *query, deparse_context *context); -static void get_update_query_def(Query *query, deparse_context *context); -static void get_update_query_targetlist_def(Query *query, List *targetList, - deparse_context *context, - RangeTblEntry *rte); -static void get_delete_query_def(Query *query, deparse_context *context); -static void get_utility_query_def(Query *query, deparse_context *context); -static void get_basic_select_query(Query *query, deparse_context *context, - TupleDesc resultDesc); -static void get_target_list(List *targetList, deparse_context *context, - TupleDesc resultDesc); -static void get_setop_query(Node *setOp, Query *query, - deparse_context *context, - TupleDesc resultDesc); -static Node *get_rule_sortgroupclause(Index ref, List *tlist, - bool force_colno, - deparse_context *context); -static void get_rule_groupingset(GroupingSet *gset, List *targetlist, - bool omit_parens, deparse_context *context); -static void get_rule_orderby(List *orderList, List *targetList, - bool force_colno, deparse_context *context); -static void get_rule_windowclause(Query *query, deparse_context *context); -static void get_rule_windowspec(WindowClause *wc, List *targetList, - deparse_context *context); -static char *get_variable(Var *var, int levelsup, bool istoplevel, - deparse_context *context); -static void get_special_variable(Node *node, deparse_context *context, - void *callback_arg); -static void resolve_special_varno(Node *node, deparse_context *context, - rsv_callback callback, void *callback_arg); -static Node *find_param_referent(Param *param, deparse_context *context, - deparse_namespace **dpns_p, ListCell **ancestor_cell_p); -static void get_parameter(Param *param, deparse_context *context); -static const char *get_simple_binary_op_name(OpExpr *expr); -static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags); -static void appendContextKeyword(deparse_context *context, const char *str, - int indentBefore, int indentAfter, int indentPlus); -static void removeStringInfoSpaces(StringInfo str); -static void get_rule_expr(Node *node, deparse_context *context, - bool showimplicit); -static void get_rule_expr_toplevel(Node *node, deparse_context *context, - bool showimplicit); -static void get_rule_expr_funccall(Node *node, deparse_context *context, - bool showimplicit); -static bool looks_like_function(Node *node); -static void get_oper_expr(OpExpr *expr, deparse_context *context); -static void get_func_expr(FuncExpr *expr, deparse_context *context, - bool showimplicit); -static void get_agg_expr(Aggref *aggref, deparse_context *context, - Aggref *original_aggref); -static void get_agg_combine_expr(Node *node, deparse_context *context, - void *callback_arg); -static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context); -static void get_coercion_expr(Node *arg, deparse_context *context, - Oid resulttype, int32 resulttypmod, - Node *parentNode); -static void get_const_expr(Const *constval, deparse_context *context, - int showtype); -static void get_const_collation(Const *constval, deparse_context *context); -static void simple_quote_literal(StringInfo buf, const char *val); -static void get_sublink_expr(SubLink *sublink, deparse_context *context); -static void get_tablefunc(TableFunc *tf, deparse_context *context, - bool showimplicit); -static void get_from_clause(Query *query, const char *prefix, - deparse_context *context); -static void get_from_clause_item(Node *jtnode, Query *query, - deparse_context *context); -static void get_column_alias_list(deparse_columns *colinfo, - deparse_context *context); -static void get_from_clause_coldeflist(RangeTblFunction *rtfunc, - deparse_columns *colinfo, - deparse_context *context); -static void get_tablesample_def(TableSampleClause *tablesample, - deparse_context *context); -static void get_opclass_name(Oid opclass, Oid actual_datatype, - StringInfo buf); -static Node *processIndirection(Node *node, deparse_context *context); -static void printSubscripts(SubscriptingRef *aref, deparse_context *context); -static char *get_relation_name(Oid relid); -static char *generate_relation_or_shard_name(Oid relid, Oid distrelid, - int64 shardid, List *namespaces); -static char *generate_rte_shard_name(RangeTblEntry *rangeTableEntry); -static char *generate_fragment_name(char *schemaName, char *tableName); -static char *generate_function_name(Oid funcid, int nargs, - List *argnames, Oid *argtypes, - bool has_variadic, bool *use_variadic_p, - ParseExprKind special_exprkind); - -#define only_marker(rte) ((rte)->inh ? "" : "ONLY ") - - - -/* - * pg_get_query_def parses back one query tree, and outputs the resulting query - * string into given buffer. - */ -void -pg_get_query_def(Query *query, StringInfo buffer) -{ - get_query_def(query, buffer, NIL, NULL, 0, WRAP_COLUMN_DEFAULT, 0); -} - -/* - * get_merged_argument_list merges both IN and OUT arguments lists into one and also - * eliminates the INOUT duplicates(present in both the lists). - */ -bool -get_merged_argument_list(CallStmt *stmt, List **mergedNamedArgList, - Oid **mergedNamedArgTypes, - List **mergedArgumentList, - int *totalArguments) -{ - /* No OUT argument support in Postgres 13 */ - return false; -} - -/* - * pg_get_rule_expr deparses an expression and returns the result as a string. - */ -char * -pg_get_rule_expr(Node *expression) -{ - bool showImplicitCasts = true; - deparse_context context; - OverrideSearchPath *overridePath = NULL; - StringInfo buffer = makeStringInfo(); - - /* - * Set search_path to NIL so that all objects outside of pg_catalog will be - * schema-prefixed. pg_catalog will be added automatically when we call - * PushOverrideSearchPath(), since we set addCatalog to true; - */ - overridePath = GetOverrideSearchPath(CurrentMemoryContext); - overridePath->schemas = NIL; - overridePath->addCatalog = true; - PushOverrideSearchPath(overridePath); - - context.buf = buffer; - context.namespaces = NIL; - context.windowClause = NIL; - context.windowTList = NIL; - context.varprefix = false; - context.prettyFlags = 0; - context.wrapColumn = WRAP_COLUMN_DEFAULT; - context.indentLevel = 0; - context.special_exprkind = EXPR_KIND_NONE; - context.distrelid = InvalidOid; - context.shardid = INVALID_SHARD_ID; - - get_rule_expr(expression, &context, showImplicitCasts); - - /* revert back to original search_path */ - PopOverrideSearchPath(); - - return buffer->data; -} - - -/* - * set_rtable_names: select RTE aliases to be used in printing a query - * - * We fill in dpns->rtable_names with a list of names that is one-for-one with - * the already-filled dpns->rtable list. Each RTE name is unique among those - * in the new namespace plus any ancestor namespaces listed in - * parent_namespaces. - * - * If rels_used isn't NULL, only RTE indexes listed in it are given aliases. - * - * Note that this function is only concerned with relation names, not column - * names. - */ -static void -set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, - Bitmapset *rels_used) -{ - HASHCTL hash_ctl; - HTAB *names_hash; - NameHashEntry *hentry; - bool found; - int rtindex; - ListCell *lc; - - dpns->rtable_names = NIL; - /* nothing more to do if empty rtable */ - if (dpns->rtable == NIL) - return; - - /* - * We use a hash table to hold known names, so that this process is O(N) - * not O(N^2) for N names. - */ - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = NAMEDATALEN; - hash_ctl.entrysize = sizeof(NameHashEntry); - hash_ctl.hcxt = CurrentMemoryContext; - names_hash = hash_create("set_rtable_names names", - list_length(dpns->rtable), - &hash_ctl, - HASH_ELEM | HASH_CONTEXT); - /* Preload the hash table with names appearing in parent_namespaces */ - foreach(lc, parent_namespaces) - { - deparse_namespace *olddpns = (deparse_namespace *) lfirst(lc); - ListCell *lc2; - - foreach(lc2, olddpns->rtable_names) - { - char *oldname = (char *) lfirst(lc2); - - if (oldname == NULL) - continue; - hentry = (NameHashEntry *) hash_search(names_hash, - oldname, - HASH_ENTER, - &found); - /* we do not complain about duplicate names in parent namespaces */ - hentry->counter = 0; - } - } - - /* Now we can scan the rtable */ - rtindex = 1; - foreach(lc, dpns->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - char *refname; - - /* Just in case this takes an unreasonable amount of time ... */ - CHECK_FOR_INTERRUPTS(); - - if (rels_used && !bms_is_member(rtindex, rels_used)) - { - /* Ignore unreferenced RTE */ - refname = NULL; - } - else if (rte->alias) - { - /* If RTE has a user-defined alias, prefer that */ - refname = rte->alias->aliasname; - } - else if (rte->rtekind == RTE_RELATION) - { - /* Use the current actual name of the relation */ - refname = get_rel_name(rte->relid); - } - else if (rte->rtekind == RTE_JOIN) - { - /* Unnamed join has no refname */ - refname = NULL; - } - else - { - /* Otherwise use whatever the parser assigned */ - refname = rte->eref->aliasname; - } - - /* - * If the selected name isn't unique, append digits to make it so, and - * make a new hash entry for it once we've got a unique name. For a - * very long input name, we might have to truncate to stay within - * NAMEDATALEN. - */ - if (refname) - { - hentry = (NameHashEntry *) hash_search(names_hash, - refname, - HASH_ENTER, - &found); - if (found) - { - /* Name already in use, must choose a new one */ - int refnamelen = strlen(refname); - char *modname = (char *) palloc(refnamelen + 16); - NameHashEntry *hentry2; - - do - { - hentry->counter++; - for (;;) - { - /* - * We avoid using %.*s here because it can misbehave - * if the data is not valid in what libc thinks is the - * prevailing encoding. - */ - memcpy(modname, refname, refnamelen); - sprintf(modname + refnamelen, "_%d", hentry->counter); - if (strlen(modname) < NAMEDATALEN) - break; - /* drop chars from refname to keep all the digits */ - refnamelen = pg_mbcliplen(refname, refnamelen, - refnamelen - 1); - } - hentry2 = (NameHashEntry *) hash_search(names_hash, - modname, - HASH_ENTER, - &found); - } while (found); - hentry2->counter = 0; /* init new hash entry */ - refname = modname; - } - else - { - /* Name not previously used, need only initialize hentry */ - hentry->counter = 0; - } - } - - dpns->rtable_names = lappend(dpns->rtable_names, refname); - rtindex++; - } - - hash_destroy(names_hash); -} - -/* - * set_deparse_for_query: set up deparse_namespace for deparsing a Query tree - * - * For convenience, this is defined to initialize the deparse_namespace struct - * from scratch. - */ -static void -set_deparse_for_query(deparse_namespace *dpns, Query *query, - List *parent_namespaces) -{ - ListCell *lc; - ListCell *lc2; - - /* Initialize *dpns and fill rtable/ctes links */ - memset(dpns, 0, sizeof(deparse_namespace)); - dpns->rtable = query->rtable; - dpns->subplans = NIL; - dpns->ctes = query->cteList; - dpns->appendrels = NULL; - - /* Assign a unique relation alias to each RTE */ - set_rtable_names(dpns, parent_namespaces, NULL); - - /* Initialize dpns->rtable_columns to contain zeroed structs */ - dpns->rtable_columns = NIL; - while (list_length(dpns->rtable_columns) < list_length(dpns->rtable)) - dpns->rtable_columns = lappend(dpns->rtable_columns, - palloc0(sizeof(deparse_columns))); - - /* If it's a utility query, it won't have a jointree */ - if (query->jointree) - { - /* Detect whether global uniqueness of USING names is needed */ - dpns->unique_using = - has_dangerous_join_using(dpns, (Node *) query->jointree); - - /* - * Select names for columns merged by USING, via a recursive pass over - * the query jointree. - */ - set_using_names(dpns, (Node *) query->jointree, NIL); - } - - /* - * Now assign remaining column aliases for each RTE. We do this in a - * linear scan of the rtable, so as to process RTEs whether or not they - * are in the jointree (we mustn't miss NEW.*, INSERT target relations, - * etc). JOIN RTEs must be processed after their children, but this is - * okay because they appear later in the rtable list than their children - * (cf Asserts in identify_join_columns()). - */ - forboth(lc, dpns->rtable, lc2, dpns->rtable_columns) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - deparse_columns *colinfo = (deparse_columns *) lfirst(lc2); - - if (rte->rtekind == RTE_JOIN) - set_join_column_names(dpns, rte, colinfo); - else - set_relation_column_names(dpns, rte, colinfo); - } -} - -/* - * has_dangerous_join_using: search jointree for unnamed JOIN USING - * - * Merged columns of a JOIN USING may act differently from either of the input - * columns, either because they are merged with COALESCE (in a FULL JOIN) or - * because an implicit coercion of the underlying input column is required. - * In such a case the column must be referenced as a column of the JOIN not as - * a column of either input. And this is problematic if the join is unnamed - * (alias-less): we cannot qualify the column's name with an RTE name, since - * there is none. (Forcibly assigning an alias to the join is not a solution, - * since that will prevent legal references to tables below the join.) - * To ensure that every column in the query is unambiguously referenceable, - * we must assign such merged columns names that are globally unique across - * the whole query, aliasing other columns out of the way as necessary. - * - * Because the ensuing re-aliasing is fairly damaging to the readability of - * the query, we don't do this unless we have to. So, we must pre-scan - * the join tree to see if we have to, before starting set_using_names(). - */ -static bool -has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode) -{ - if (IsA(jtnode, RangeTblRef)) - { - /* nothing to do here */ - } - else if (IsA(jtnode, FromExpr)) - { - FromExpr *f = (FromExpr *) jtnode; - ListCell *lc; - - foreach(lc, f->fromlist) - { - if (has_dangerous_join_using(dpns, (Node *) lfirst(lc))) - return true; - } - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - - /* Is it an unnamed JOIN with USING? */ - if (j->alias == NULL && j->usingClause) - { - /* - * Yes, so check each join alias var to see if any of them are not - * simple references to underlying columns. If so, we have a - * dangerous situation and must pick unique aliases. - */ - RangeTblEntry *jrte = rt_fetch(j->rtindex, dpns->rtable); - - /* We need only examine the merged columns */ - for (int i = 0; i < jrte->joinmergedcols; i++) - { - Node *aliasvar = list_nth(jrte->joinaliasvars, i); - - if (!IsA(aliasvar, Var)) - return true; - } - } - - /* Nope, but inspect children */ - if (has_dangerous_join_using(dpns, j->larg)) - return true; - if (has_dangerous_join_using(dpns, j->rarg)) - return true; - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); - return false; -} - -/* - * set_using_names: select column aliases to be used for merged USING columns - * - * We do this during a recursive descent of the query jointree. - * dpns->unique_using must already be set to determine the global strategy. - * - * Column alias info is saved in the dpns->rtable_columns list, which is - * assumed to be filled with pre-zeroed deparse_columns structs. - * - * parentUsing is a list of all USING aliases assigned in parent joins of - * the current jointree node. (The passed-in list must not be modified.) - */ -static void -set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing) -{ - if (IsA(jtnode, RangeTblRef)) - { - /* nothing to do now */ - } - else if (IsA(jtnode, FromExpr)) - { - FromExpr *f = (FromExpr *) jtnode; - ListCell *lc; - - foreach(lc, f->fromlist) - set_using_names(dpns, (Node *) lfirst(lc), parentUsing); - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - RangeTblEntry *rte = rt_fetch(j->rtindex, dpns->rtable); - deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); - int *leftattnos; - int *rightattnos; - deparse_columns *leftcolinfo; - deparse_columns *rightcolinfo; - int i; - ListCell *lc; - - /* Get info about the shape of the join */ - identify_join_columns(j, rte, colinfo); - leftattnos = colinfo->leftattnos; - rightattnos = colinfo->rightattnos; - - /* Look up the not-yet-filled-in child deparse_columns structs */ - leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); - rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); - - /* - * If this join is unnamed, then we cannot substitute new aliases at - * this level, so any name requirements pushed down to here must be - * pushed down again to the children. - */ - if (rte->alias == NULL) - { - for (i = 0; i < colinfo->num_cols; i++) - { - char *colname = colinfo->colnames[i]; - - if (colname == NULL) - continue; - - /* Push down to left column, unless it's a system column */ - if (leftattnos[i] > 0) - { - expand_colnames_array_to(leftcolinfo, leftattnos[i]); - leftcolinfo->colnames[leftattnos[i] - 1] = colname; - } - - /* Same on the righthand side */ - if (rightattnos[i] > 0) - { - expand_colnames_array_to(rightcolinfo, rightattnos[i]); - rightcolinfo->colnames[rightattnos[i] - 1] = colname; - } - } - } - - /* - * If there's a USING clause, select the USING column names and push - * those names down to the children. We have two strategies: - * - * If dpns->unique_using is true, we force all USING names to be - * unique across the whole query level. In principle we'd only need - * the names of dangerous USING columns to be globally unique, but to - * safely assign all USING names in a single pass, we have to enforce - * the same uniqueness rule for all of them. However, if a USING - * column's name has been pushed down from the parent, we should use - * it as-is rather than making a uniqueness adjustment. This is - * necessary when we're at an unnamed join, and it creates no risk of - * ambiguity. Also, if there's a user-written output alias for a - * merged column, we prefer to use that rather than the input name; - * this simplifies the logic and seems likely to lead to less aliasing - * overall. - * - * If dpns->unique_using is false, we only need USING names to be - * unique within their own join RTE. We still need to honor - * pushed-down names, though. - * - * Though significantly different in results, these two strategies are - * implemented by the same code, with only the difference of whether - * to put assigned names into dpns->using_names. - */ - if (j->usingClause) - { - /* Copy the input parentUsing list so we don't modify it */ - parentUsing = list_copy(parentUsing); - - /* USING names must correspond to the first join output columns */ - expand_colnames_array_to(colinfo, list_length(j->usingClause)); - i = 0; - foreach(lc, j->usingClause) - { - char *colname = strVal(lfirst(lc)); - - /* Assert it's a merged column */ - Assert(leftattnos[i] != 0 && rightattnos[i] != 0); - - /* Adopt passed-down name if any, else select unique name */ - if (colinfo->colnames[i] != NULL) - colname = colinfo->colnames[i]; - else - { - /* Prefer user-written output alias if any */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - /* Make it appropriately unique */ - colname = make_colname_unique(colname, dpns, colinfo); - if (dpns->unique_using) - dpns->using_names = lappend(dpns->using_names, - colname); - /* Save it as output column name, too */ - colinfo->colnames[i] = colname; - } - - /* Remember selected names for use later */ - colinfo->usingNames = lappend(colinfo->usingNames, colname); - parentUsing = lappend(parentUsing, colname); - - /* Push down to left column, unless it's a system column */ - if (leftattnos[i] > 0) - { - expand_colnames_array_to(leftcolinfo, leftattnos[i]); - leftcolinfo->colnames[leftattnos[i] - 1] = colname; - } - - /* Same on the righthand side */ - if (rightattnos[i] > 0) - { - expand_colnames_array_to(rightcolinfo, rightattnos[i]); - rightcolinfo->colnames[rightattnos[i] - 1] = colname; - } - - i++; - } - } - - /* Mark child deparse_columns structs with correct parentUsing info */ - leftcolinfo->parentUsing = parentUsing; - rightcolinfo->parentUsing = parentUsing; - - /* Now recursively assign USING column names in children */ - set_using_names(dpns, j->larg, parentUsing); - set_using_names(dpns, j->rarg, parentUsing); - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); -} - -/* - * set_relation_column_names: select column aliases for a non-join RTE - * - * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. - * If any colnames entries are already filled in, those override local - * choices. - */ -static void -set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo) -{ - int ncolumns; - char **real_colnames; - bool changed_any; - bool has_anonymous; - int noldcolumns; - int i; - int j; - - /* - * Extract the RTE's "real" column names. This is comparable to - * get_rte_attribute_name, except that it's important to disregard dropped - * columns. We put NULL into the array for a dropped column. - */ - if (rte->rtekind == RTE_RELATION || - GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - /* Relation --- look to the system catalogs for up-to-date info */ - Relation rel; - TupleDesc tupdesc; - - rel = relation_open(rte->relid, AccessShareLock); - tupdesc = RelationGetDescr(rel); - - ncolumns = tupdesc->natts; - real_colnames = (char **) palloc(ncolumns * sizeof(char *)); - - for (i = 0; i < ncolumns; i++) - { - Form_pg_attribute attr = TupleDescAttr(tupdesc, i); - - if (attr->attisdropped) - real_colnames[i] = NULL; - else - real_colnames[i] = pstrdup(NameStr(attr->attname)); - } - relation_close(rel, AccessShareLock); - } - else - { - /* Otherwise use the column names from eref */ - ListCell *lc; - - ncolumns = list_length(rte->eref->colnames); - real_colnames = (char **) palloc(ncolumns * sizeof(char *)); - - i = 0; - foreach(lc, rte->eref->colnames) - { - /* - * If the column name shown in eref is an empty string, then it's - * a column that was dropped at the time of parsing the query, so - * treat it as dropped. - */ - char *cname = strVal(lfirst(lc)); - - if (cname[0] == '\0') - cname = NULL; - real_colnames[i] = cname; - i++; - } - } - - /* - * Ensure colinfo->colnames has a slot for each column. (It could be long - * enough already, if we pushed down a name for the last column.) Note: - * it's possible that there are now more columns than there were when the - * query was parsed, ie colnames could be longer than rte->eref->colnames. - * We must assign unique aliases to the new columns too, else there could - * be unresolved conflicts when the view/rule is reloaded. - */ - expand_colnames_array_to(colinfo, ncolumns); - Assert(colinfo->num_cols == ncolumns); - - /* - * Make sufficiently large new_colnames and is_new_col arrays, too. - * - * Note: because we leave colinfo->num_new_cols zero until after the loop, - * colname_is_unique will not consult that array, which is fine because it - * would only be duplicate effort. - */ - colinfo->new_colnames = (char **) palloc(ncolumns * sizeof(char *)); - colinfo->is_new_col = (bool *) palloc(ncolumns * sizeof(bool)); - - /* - * Scan the columns, select a unique alias for each one, and store it in - * colinfo->colnames and colinfo->new_colnames. The former array has NULL - * entries for dropped columns, the latter omits them. Also mark - * new_colnames entries as to whether they are new since parse time; this - * is the case for entries beyond the length of rte->eref->colnames. - */ - noldcolumns = list_length(rte->eref->colnames); - changed_any = false; - has_anonymous = false; - j = 0; - for (i = 0; i < ncolumns; i++) - { - char *real_colname = real_colnames[i]; - char *colname = colinfo->colnames[i]; - - /* Skip dropped columns */ - if (real_colname == NULL) - { - Assert(colname == NULL); /* colnames[i] is already NULL */ - continue; - } - - /* If alias already assigned, that's what to use */ - if (colname == NULL) - { - /* If user wrote an alias, prefer that over real column name */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - else - colname = real_colname; - - /* Unique-ify and insert into colinfo */ - colname = make_colname_unique(colname, dpns, colinfo); - - colinfo->colnames[i] = colname; - } - - /* Put names of non-dropped columns in new_colnames[] too */ - colinfo->new_colnames[j] = colname; - /* And mark them as new or not */ - colinfo->is_new_col[j] = (i >= noldcolumns); - j++; - - /* Remember if any assigned aliases differ from "real" name */ - if (!changed_any && strcmp(colname, real_colname) != 0) - changed_any = true; - - /* - * Remember if there is a reference to an anonymous column as named by - * char * FigureColname(Node *node) - */ - if (!has_anonymous && strcmp(real_colname, "?column?") == 0) - has_anonymous = true; - } - - /* - * Set correct length for new_colnames[] array. (Note: if columns have - * been added, colinfo->num_cols includes them, which is not really quite - * right but is harmless, since any new columns must be at the end where - * they won't affect varattnos of pre-existing columns.) - */ - colinfo->num_new_cols = j; - - /* - * For a relation RTE, we need only print the alias column names if any - * are different from the underlying "real" names. For a function RTE, - * always emit a complete column alias list; this is to protect against - * possible instability of the default column names (eg, from altering - * parameter names). For tablefunc RTEs, we never print aliases, because - * the column names are part of the clause itself. For other RTE types, - * print if we changed anything OR if there were user-written column - * aliases (since the latter would be part of the underlying "reality"). - */ - if (rte->rtekind == RTE_RELATION) - colinfo->printaliases = changed_any; - else if (rte->rtekind == RTE_FUNCTION) - colinfo->printaliases = true; - else if (rte->rtekind == RTE_TABLEFUNC) - colinfo->printaliases = false; - else if (rte->alias && rte->alias->colnames != NIL) - colinfo->printaliases = true; - else - colinfo->printaliases = changed_any || has_anonymous; -} - -/* - * set_join_column_names: select column aliases for a join RTE - * - * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. - * If any colnames entries are already filled in, those override local - * choices. Also, names for USING columns were already chosen by - * set_using_names(). We further expect that column alias selection has been - * completed for both input RTEs. - */ -static void -set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo) -{ - deparse_columns *leftcolinfo; - deparse_columns *rightcolinfo; - bool changed_any; - int noldcolumns; - int nnewcolumns; - Bitmapset *leftmerged = NULL; - Bitmapset *rightmerged = NULL; - int i; - int j; - int ic; - int jc; - - /* Look up the previously-filled-in child deparse_columns structs */ - leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); - rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); - - /* - * Ensure colinfo->colnames has a slot for each column. (It could be long - * enough already, if we pushed down a name for the last column.) Note: - * it's possible that one or both inputs now have more columns than there - * were when the query was parsed, but we'll deal with that below. We - * only need entries in colnames for pre-existing columns. - */ - noldcolumns = list_length(rte->eref->colnames); - expand_colnames_array_to(colinfo, noldcolumns); - Assert(colinfo->num_cols == noldcolumns); - - /* - * Scan the join output columns, select an alias for each one, and store - * it in colinfo->colnames. If there are USING columns, set_using_names() - * already selected their names, so we can start the loop at the first - * non-merged column. - */ - changed_any = false; - for (i = list_length(colinfo->usingNames); i < noldcolumns; i++) - { - char *colname = colinfo->colnames[i]; - char *real_colname; - - /* Join column must refer to at least one input column */ - Assert(colinfo->leftattnos[i] != 0 || colinfo->rightattnos[i] != 0); - - /* Get the child column name */ - if (colinfo->leftattnos[i] > 0) - real_colname = leftcolinfo->colnames[colinfo->leftattnos[i] - 1]; - else if (colinfo->rightattnos[i] > 0) - real_colname = rightcolinfo->colnames[colinfo->rightattnos[i] - 1]; - else - { - /* We're joining system columns --- use eref name */ - real_colname = strVal(list_nth(rte->eref->colnames, i)); - } - /* If child col has been dropped, no need to assign a join colname */ - if (real_colname == NULL) - { - colinfo->colnames[i] = NULL; - continue; - } - - /* In an unnamed join, just report child column names as-is */ - if (rte->alias == NULL) - { - colinfo->colnames[i] = real_colname; - continue; - } - - /* If alias already assigned, that's what to use */ - if (colname == NULL) - { - /* If user wrote an alias, prefer that over real column name */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - else - colname = real_colname; - - /* Unique-ify and insert into colinfo */ - colname = make_colname_unique(colname, dpns, colinfo); - - colinfo->colnames[i] = colname; - } - - /* Remember if any assigned aliases differ from "real" name */ - if (!changed_any && strcmp(colname, real_colname) != 0) - changed_any = true; - } - - /* - * Calculate number of columns the join would have if it were re-parsed - * now, and create storage for the new_colnames and is_new_col arrays. - * - * Note: colname_is_unique will be consulting new_colnames[] during the - * loops below, so its not-yet-filled entries must be zeroes. - */ - nnewcolumns = leftcolinfo->num_new_cols + rightcolinfo->num_new_cols - - list_length(colinfo->usingNames); - colinfo->num_new_cols = nnewcolumns; - colinfo->new_colnames = (char **) palloc0(nnewcolumns * sizeof(char *)); - colinfo->is_new_col = (bool *) palloc0(nnewcolumns * sizeof(bool)); - - /* - * Generating the new_colnames array is a bit tricky since any new columns - * added since parse time must be inserted in the right places. This code - * must match the parser, which will order a join's columns as merged - * columns first (in USING-clause order), then non-merged columns from the - * left input (in attnum order), then non-merged columns from the right - * input (ditto). If one of the inputs is itself a join, its columns will - * be ordered according to the same rule, which means newly-added columns - * might not be at the end. We can figure out what's what by consulting - * the leftattnos and rightattnos arrays plus the input is_new_col arrays. - * - * In these loops, i indexes leftattnos/rightattnos (so it's join varattno - * less one), j indexes new_colnames/is_new_col, and ic/jc have similar - * meanings for the current child RTE. - */ - - /* Handle merged columns; they are first and can't be new */ - i = j = 0; - while (i < noldcolumns && - colinfo->leftattnos[i] != 0 && - colinfo->rightattnos[i] != 0) - { - /* column name is already determined and known unique */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - colinfo->is_new_col[j] = false; - - /* build bitmapsets of child attnums of merged columns */ - if (colinfo->leftattnos[i] > 0) - leftmerged = bms_add_member(leftmerged, colinfo->leftattnos[i]); - if (colinfo->rightattnos[i] > 0) - rightmerged = bms_add_member(rightmerged, colinfo->rightattnos[i]); - - i++, j++; - } - - /* Handle non-merged left-child columns */ - ic = 0; - for (jc = 0; jc < leftcolinfo->num_new_cols; jc++) - { - char *child_colname = leftcolinfo->new_colnames[jc]; - - if (!leftcolinfo->is_new_col[jc]) - { - /* Advance ic to next non-dropped old column of left child */ - while (ic < leftcolinfo->num_cols && - leftcolinfo->colnames[ic] == NULL) - ic++; - Assert(ic < leftcolinfo->num_cols); - ic++; - /* If it is a merged column, we already processed it */ - if (bms_is_member(ic, leftmerged)) - continue; - /* Else, advance i to the corresponding existing join column */ - while (i < colinfo->num_cols && - colinfo->colnames[i] == NULL) - i++; - Assert(i < colinfo->num_cols); - Assert(ic == colinfo->leftattnos[i]); - /* Use the already-assigned name of this column */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - i++; - } - else - { - /* - * Unique-ify the new child column name and assign, unless we're - * in an unnamed join, in which case just copy - */ - if (rte->alias != NULL) - { - colinfo->new_colnames[j] = - make_colname_unique(child_colname, dpns, colinfo); - if (!changed_any && - strcmp(colinfo->new_colnames[j], child_colname) != 0) - changed_any = true; - } - else - colinfo->new_colnames[j] = child_colname; - } - - colinfo->is_new_col[j] = leftcolinfo->is_new_col[jc]; - j++; - } - - /* Handle non-merged right-child columns in exactly the same way */ - ic = 0; - for (jc = 0; jc < rightcolinfo->num_new_cols; jc++) - { - char *child_colname = rightcolinfo->new_colnames[jc]; - - if (!rightcolinfo->is_new_col[jc]) - { - /* Advance ic to next non-dropped old column of right child */ - while (ic < rightcolinfo->num_cols && - rightcolinfo->colnames[ic] == NULL) - ic++; - Assert(ic < rightcolinfo->num_cols); - ic++; - /* If it is a merged column, we already processed it */ - if (bms_is_member(ic, rightmerged)) - continue; - /* Else, advance i to the corresponding existing join column */ - while (i < colinfo->num_cols && - colinfo->colnames[i] == NULL) - i++; - Assert(i < colinfo->num_cols); - Assert(ic == colinfo->rightattnos[i]); - /* Use the already-assigned name of this column */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - i++; - } - else - { - /* - * Unique-ify the new child column name and assign, unless we're - * in an unnamed join, in which case just copy - */ - if (rte->alias != NULL) - { - colinfo->new_colnames[j] = - make_colname_unique(child_colname, dpns, colinfo); - if (!changed_any && - strcmp(colinfo->new_colnames[j], child_colname) != 0) - changed_any = true; - } - else - colinfo->new_colnames[j] = child_colname; - } - - colinfo->is_new_col[j] = rightcolinfo->is_new_col[jc]; - j++; - } - - /* Assert we processed the right number of columns */ -#ifdef USE_ASSERT_CHECKING - while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) - i++; - Assert(i == colinfo->num_cols); - Assert(j == nnewcolumns); -#endif - - /* - * For a named join, print column aliases if we changed any from the child - * names. Unnamed joins cannot print aliases. - */ - if (rte->alias != NULL) - colinfo->printaliases = changed_any; - else - colinfo->printaliases = false; -} - -/* - * colname_is_unique: is colname distinct from already-chosen column names? - * - * dpns is query-wide info, colinfo is for the column's RTE - */ -static bool -colname_is_unique(const char *colname, deparse_namespace *dpns, - deparse_columns *colinfo) -{ - int i; - ListCell *lc; - - /* Check against already-assigned column aliases within RTE */ - for (i = 0; i < colinfo->num_cols; i++) - { - char *oldname = colinfo->colnames[i]; - - if (oldname && strcmp(oldname, colname) == 0) - return false; - } - - /* - * If we're building a new_colnames array, check that too (this will be - * partially but not completely redundant with the previous checks) - */ - for (i = 0; i < colinfo->num_new_cols; i++) - { - char *oldname = colinfo->new_colnames[i]; - - if (oldname && strcmp(oldname, colname) == 0) - return false; - } - - /* Also check against USING-column names that must be globally unique */ - foreach(lc, dpns->using_names) - { - char *oldname = (char *) lfirst(lc); - - if (strcmp(oldname, colname) == 0) - return false; - } - - /* Also check against names already assigned for parent-join USING cols */ - foreach(lc, colinfo->parentUsing) - { - char *oldname = (char *) lfirst(lc); - - if (strcmp(oldname, colname) == 0) - return false; - } - - return true; -} - -/* - * make_colname_unique: modify colname if necessary to make it unique - * - * dpns is query-wide info, colinfo is for the column's RTE - */ -static char * -make_colname_unique(char *colname, deparse_namespace *dpns, - deparse_columns *colinfo) -{ - /* - * If the selected name isn't unique, append digits to make it so. For a - * very long input name, we might have to truncate to stay within - * NAMEDATALEN. - */ - if (!colname_is_unique(colname, dpns, colinfo)) - { - int colnamelen = strlen(colname); - char *modname = (char *) palloc(colnamelen + 16); - int i = 0; - - do - { - i++; - for (;;) - { - /* - * We avoid using %.*s here because it can misbehave if the - * data is not valid in what libc thinks is the prevailing - * encoding. - */ - memcpy(modname, colname, colnamelen); - sprintf(modname + colnamelen, "_%d", i); - if (strlen(modname) < NAMEDATALEN) - break; - /* drop chars from colname to keep all the digits */ - colnamelen = pg_mbcliplen(colname, colnamelen, - colnamelen - 1); - } - } while (!colname_is_unique(modname, dpns, colinfo)); - colname = modname; - } - return colname; -} - -/* - * expand_colnames_array_to: make colinfo->colnames at least n items long - * - * Any added array entries are initialized to zero. - */ -static void -expand_colnames_array_to(deparse_columns *colinfo, int n) -{ - if (n > colinfo->num_cols) - { - if (colinfo->colnames == NULL) - colinfo->colnames = (char **) palloc0(n * sizeof(char *)); - else - { - colinfo->colnames = (char **) repalloc(colinfo->colnames, - n * sizeof(char *)); - memset(colinfo->colnames + colinfo->num_cols, 0, - (n - colinfo->num_cols) * sizeof(char *)); - } - colinfo->num_cols = n; - } -} - -/* - * identify_join_columns: figure out where columns of a join come from - * - * Fills the join-specific fields of the colinfo struct, except for - * usingNames which is filled later. - */ -static void -identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, - deparse_columns *colinfo) -{ - int numjoincols; - int jcolno; - int rcolno; - ListCell *lc; - - /* Extract left/right child RT indexes */ - if (IsA(j->larg, RangeTblRef)) - colinfo->leftrti = ((RangeTblRef *) j->larg)->rtindex; - else if (IsA(j->larg, JoinExpr)) - colinfo->leftrti = ((JoinExpr *) j->larg)->rtindex; - else - elog(ERROR, "unrecognized node type in jointree: %d", - (int) nodeTag(j->larg)); - if (IsA(j->rarg, RangeTblRef)) - colinfo->rightrti = ((RangeTblRef *) j->rarg)->rtindex; - else if (IsA(j->rarg, JoinExpr)) - colinfo->rightrti = ((JoinExpr *) j->rarg)->rtindex; - else - elog(ERROR, "unrecognized node type in jointree: %d", - (int) nodeTag(j->rarg)); - - /* Assert children will be processed earlier than join in second pass */ - Assert(colinfo->leftrti < j->rtindex); - Assert(colinfo->rightrti < j->rtindex); - - /* Initialize result arrays with zeroes */ - numjoincols = list_length(jrte->joinaliasvars); - Assert(numjoincols == list_length(jrte->eref->colnames)); - colinfo->leftattnos = (int *) palloc0(numjoincols * sizeof(int)); - colinfo->rightattnos = (int *) palloc0(numjoincols * sizeof(int)); - - /* - * Deconstruct RTE's joinleftcols/joinrightcols into desired format. - * Recall that the column(s) merged due to USING are the first column(s) - * of the join output. We need not do anything special while scanning - * joinleftcols, but while scanning joinrightcols we must distinguish - * merged from unmerged columns. - */ - jcolno = 0; - foreach(lc, jrte->joinleftcols) - { - int leftattno = lfirst_int(lc); - - colinfo->leftattnos[jcolno++] = leftattno; - } - rcolno = 0; - foreach(lc, jrte->joinrightcols) - { - int rightattno = lfirst_int(lc); - - if (rcolno < jrte->joinmergedcols) /* merged column? */ - colinfo->rightattnos[rcolno] = rightattno; - else - colinfo->rightattnos[jcolno++] = rightattno; - rcolno++; - } - Assert(jcolno == numjoincols); -} - -/* - * get_rtable_name: convenience function to get a previously assigned RTE alias - * - * The RTE must belong to the topmost namespace level in "context". - */ -static char * -get_rtable_name(int rtindex, deparse_context *context) -{ - deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); - - Assert(rtindex > 0 && rtindex <= list_length(dpns->rtable_names)); - return (char *) list_nth(dpns->rtable_names, rtindex - 1); -} - -/* - * set_deparse_plan: set up deparse_namespace to parse subexpressions - * of a given Plan node - * - * This sets the plan, outer_planstate, inner_planstate, outer_tlist, - * inner_tlist, and index_tlist fields. Caller is responsible for adjusting - * the ancestors list if necessary. Note that the rtable and ctes fields do - * not need to change when shifting attention to different plan nodes in a - * single plan tree. - */ -static void -set_deparse_plan(deparse_namespace *dpns, Plan *plan) -{ - dpns->plan = plan; - - /* - * We special-case Append and MergeAppend to pretend that the first child - * plan is the OUTER referent; we have to interpret OUTER Vars in their - * tlists according to one of the children, and the first one is the most - * natural choice. Likewise special-case ModifyTable to pretend that the - * first child plan is the OUTER referent; this is to support RETURNING - * lists containing references to non-target relations. - */ - if (IsA(plan, Append)) - dpns->outer_plan = linitial(((Append *) plan)->appendplans); - else if (IsA(plan, MergeAppend)) - dpns->outer_plan = linitial(((MergeAppend *) plan)->mergeplans); - else if (IsA(plan, ModifyTable)) - dpns->outer_plan = linitial(((ModifyTable *) plan)->plans); - else - dpns->outer_plan = outerPlan(plan); - - if (dpns->outer_plan) - dpns->outer_tlist = dpns->outer_plan->targetlist; - else - dpns->outer_tlist = NIL; - - /* - * For a SubqueryScan, pretend the subplan is INNER referent. (We don't - * use OUTER because that could someday conflict with the normal meaning.) - * Likewise, for a CteScan, pretend the subquery's plan is INNER referent. - * For ON CONFLICT .. UPDATE we just need the inner tlist to point to the - * excluded expression's tlist. (Similar to the SubqueryScan we don't want - * to reuse OUTER, it's used for RETURNING in some modify table cases, - * although not INSERT .. CONFLICT). - */ - if (IsA(plan, SubqueryScan)) - dpns->inner_plan = ((SubqueryScan *) plan)->subplan; - else if (IsA(plan, CteScan)) - dpns->inner_plan = list_nth(dpns->subplans, - ((CteScan *) plan)->ctePlanId - 1); - else if (IsA(plan, ModifyTable)) - dpns->inner_plan = plan; - else - dpns->inner_plan = innerPlan(plan); - - if (IsA(plan, ModifyTable)) - dpns->inner_tlist = ((ModifyTable *) plan)->exclRelTlist; - else if (dpns->inner_plan) - dpns->inner_tlist = dpns->inner_plan->targetlist; - else - dpns->inner_tlist = NIL; - - /* Set up referent for INDEX_VAR Vars, if needed */ - if (IsA(plan, IndexOnlyScan)) - dpns->index_tlist = ((IndexOnlyScan *) plan)->indextlist; - else if (IsA(plan, ForeignScan)) - dpns->index_tlist = ((ForeignScan *) plan)->fdw_scan_tlist; - else if (IsA(plan, CustomScan)) - dpns->index_tlist = ((CustomScan *) plan)->custom_scan_tlist; - else - dpns->index_tlist = NIL; -} - -/* - * push_child_plan: temporarily transfer deparsing attention to a child plan - * - * When expanding an OUTER_VAR or INNER_VAR reference, we must adjust the - * deparse context in case the referenced expression itself uses - * OUTER_VAR/INNER_VAR. We modify the top stack entry in-place to avoid - * affecting levelsup issues (although in a Plan tree there really shouldn't - * be any). - * - * Caller must provide a local deparse_namespace variable to save the - * previous state for pop_child_plan. - */ -static void -push_child_plan(deparse_namespace *dpns, Plan *plan, - deparse_namespace *save_dpns) -{ - /* Save state for restoration later */ - *save_dpns = *dpns; - - /* Link current plan node into ancestors list */ - dpns->ancestors = lcons(dpns->plan, dpns->ancestors); - - /* Set attention on selected child */ - set_deparse_plan(dpns, plan); -} - -/* - * pop_child_plan: undo the effects of push_child_plan - */ -static void -pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) -{ - List *ancestors; - - /* Get rid of ancestors list cell added by push_child_plan */ - ancestors = list_delete_first(dpns->ancestors); - - /* Restore fields changed by push_child_plan */ - *dpns = *save_dpns; - - /* Make sure dpns->ancestors is right (may be unnecessary) */ - dpns->ancestors = ancestors; -} - -/* - * push_ancestor_plan: temporarily transfer deparsing attention to an - * ancestor plan - * - * When expanding a Param reference, we must adjust the deparse context - * to match the plan node that contains the expression being printed; - * otherwise we'd fail if that expression itself contains a Param or - * OUTER_VAR/INNER_VAR/INDEX_VAR variable. - * - * The target ancestor is conveniently identified by the ListCell holding it - * in dpns->ancestors. - * - * Caller must provide a local deparse_namespace variable to save the - * previous state for pop_ancestor_plan. - */ -static void -push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, - deparse_namespace *save_dpns) -{ - Plan *plan = (Plan *) lfirst(ancestor_cell); - - /* Save state for restoration later */ - *save_dpns = *dpns; - - /* Build a new ancestor list with just this node's ancestors */ - dpns->ancestors = - list_copy_tail(dpns->ancestors, - list_cell_number(dpns->ancestors, ancestor_cell) + 1); - - /* Set attention on selected ancestor */ - set_deparse_plan(dpns, plan); -} - -/* - * pop_ancestor_plan: undo the effects of push_ancestor_plan - */ -static void -pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) -{ - /* Free the ancestor list made in push_ancestor_plan */ - list_free(dpns->ancestors); - - /* Restore fields changed by push_ancestor_plan */ - *dpns = *save_dpns; -} - - -/* ---------- - * deparse_shard_query - Parse back a query for execution on a shard - * - * Builds an SQL string to perform the provided query on a specific shard and - * places this string into the provided buffer. - * ---------- - */ -void -deparse_shard_query(Query *query, Oid distrelid, int64 shardid, - StringInfo buffer) -{ - get_query_def_extended(query, buffer, NIL, distrelid, shardid, NULL, 0, - WRAP_COLUMN_DEFAULT, 0); -} - - -/* ---------- - * get_query_def - Parse back one query parsetree - * - * If resultDesc is not NULL, then it is the output tuple descriptor for - * the view represented by a SELECT query. - * ---------- - */ -static void -get_query_def(Query *query, StringInfo buf, List *parentnamespace, - TupleDesc resultDesc, - int prettyFlags, int wrapColumn, int startIndent) -{ - get_query_def_extended(query, buf, parentnamespace, InvalidOid, 0, resultDesc, - prettyFlags, wrapColumn, startIndent); -} - - -/* ---------- - * get_query_def_extended - Parse back one query parsetree, optionally - * with extension using a shard identifier. - * - * If distrelid is valid and shardid is positive, the provided shardid is added - * any time the provided relid is deparsed, so that the query may be executed - * on a placement for the given shard. - * ---------- - */ -static void -get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace, - Oid distrelid, int64 shardid, TupleDesc resultDesc, - int prettyFlags, int wrapColumn, int startIndent) -{ - deparse_context context; - deparse_namespace dpns; - - OverrideSearchPath *overridePath = NULL; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - /* - * Before we begin to examine the query, acquire locks on referenced - * relations, and fix up deleted columns in JOIN RTEs. This ensures - * consistent results. Note we assume it's OK to scribble on the passed - * querytree! - * - * We are only deparsing the query (we are not about to execute it), so we - * only need AccessShareLock on the relations it mentions. - */ - AcquireRewriteLocks(query, false, false); - - /* - * Set search_path to NIL so that all objects outside of pg_catalog will be - * schema-prefixed. pg_catalog will be added automatically when we call - * PushOverrideSearchPath(), since we set addCatalog to true; - */ - overridePath = GetOverrideSearchPath(CurrentMemoryContext); - overridePath->schemas = NIL; - overridePath->addCatalog = true; - PushOverrideSearchPath(overridePath); - - context.buf = buf; - context.namespaces = lcons(&dpns, list_copy(parentnamespace)); - context.windowClause = NIL; - context.windowTList = NIL; - context.varprefix = (parentnamespace != NIL || - list_length(query->rtable) != 1); - context.prettyFlags = prettyFlags; - context.wrapColumn = wrapColumn; - context.indentLevel = startIndent; - context.special_exprkind = EXPR_KIND_NONE; - context.appendparents = NULL; - context.distrelid = distrelid; - context.shardid = shardid; - - set_deparse_for_query(&dpns, query, parentnamespace); - - switch (query->commandType) - { - case CMD_SELECT: - get_select_query_def(query, &context, resultDesc); - break; - - case CMD_UPDATE: - get_update_query_def(query, &context); - break; - - case CMD_INSERT: - get_insert_query_def(query, &context); - break; - - case CMD_DELETE: - get_delete_query_def(query, &context); - break; - - case CMD_NOTHING: - appendStringInfoString(buf, "NOTHING"); - break; - - case CMD_UTILITY: - get_utility_query_def(query, &context); - break; - - default: - elog(ERROR, "unrecognized query command type: %d", - query->commandType); - break; - } - - /* revert back to original search_path */ - PopOverrideSearchPath(); -} - -/* ---------- - * get_values_def - Parse back a VALUES list - * ---------- - */ -static void -get_values_def(List *values_lists, deparse_context *context) -{ - StringInfo buf = context->buf; - bool first_list = true; - ListCell *vtl; - - appendStringInfoString(buf, "VALUES "); - - foreach(vtl, values_lists) - { - List *sublist = (List *) lfirst(vtl); - bool first_col = true; - ListCell *lc; - - if (first_list) - first_list = false; - else - appendStringInfoString(buf, ", "); - - appendStringInfoChar(buf, '('); - foreach(lc, sublist) - { - Node *col = (Node *) lfirst(lc); - - if (first_col) - first_col = false; - else - appendStringInfoChar(buf, ','); - - /* - * Print the value. Whole-row Vars need special treatment. - */ - get_rule_expr_toplevel(col, context, false); - } - appendStringInfoChar(buf, ')'); - } -} - -/* ---------- - * get_with_clause - Parse back a WITH clause - * ---------- - */ -static void -get_with_clause(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - if (query->cteList == NIL) - return; - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - - if (query->hasRecursive) - sep = "WITH RECURSIVE "; - else - sep = "WITH "; - foreach(l, query->cteList) - { - CommonTableExpr *cte = (CommonTableExpr *) lfirst(l); - - appendStringInfoString(buf, sep); - appendStringInfoString(buf, quote_identifier(cte->ctename)); - if (cte->aliascolnames) - { - bool first = true; - ListCell *col; - - appendStringInfoChar(buf, '('); - foreach(col, cte->aliascolnames) - { - if (first) - first = false; - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, - quote_identifier(strVal(lfirst(col)))); - } - appendStringInfoChar(buf, ')'); - } - appendStringInfoString(buf, " AS "); - switch (cte->ctematerialized) - { - case CTEMaterializeDefault: - break; - case CTEMaterializeAlways: - appendStringInfoString(buf, "MATERIALIZED "); - break; - case CTEMaterializeNever: - appendStringInfoString(buf, "NOT MATERIALIZED "); - break; - } - appendStringInfoChar(buf, '('); - if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", 0, 0, 0); - get_query_def((Query *) cte->ctequery, buf, context->namespaces, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", 0, 0, 0); - appendStringInfoChar(buf, ')'); - sep = ", "; - } - - if (PRETTY_INDENT(context)) - { - context->indentLevel -= PRETTYINDENT_STD; - appendContextKeyword(context, "", 0, 0, 0); - } - else - appendStringInfoChar(buf, ' '); -} - -/* ---------- - * get_select_query_def - Parse back a SELECT parsetree - * ---------- - */ -static void -get_select_query_def(Query *query, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - List *save_windowclause; - List *save_windowtlist; - bool force_colno; - ListCell *l; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* Set up context for possible window functions */ - save_windowclause = context->windowClause; - context->windowClause = query->windowClause; - save_windowtlist = context->windowTList; - context->windowTList = query->targetList; - - /* - * If the Query node has a setOperations tree, then it's the top level of - * a UNION/INTERSECT/EXCEPT query; only the WITH, ORDER BY and LIMIT - * fields are interesting in the top query itself. - */ - if (query->setOperations) - { - get_setop_query(query->setOperations, query, context, resultDesc); - /* ORDER BY clauses must be simple in this case */ - force_colno = true; - } - else - { - get_basic_select_query(query, context, resultDesc); - force_colno = false; - } - - /* Add the ORDER BY clause if given */ - if (query->sortClause != NIL) - { - appendContextKeyword(context, " ORDER BY ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_orderby(query->sortClause, query->targetList, - force_colno, context); - } - - /* - * Add the LIMIT/OFFSET clauses if given. If non-default options, use the - * standard spelling of LIMIT. - */ - if (query->limitOffset != NULL) - { - appendContextKeyword(context, " OFFSET ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - get_rule_expr(query->limitOffset, context, false); - } - if (query->limitCount != NULL) - { - if (query->limitOption == LIMIT_OPTION_WITH_TIES) - { - // had to add '(' and ')' here because it fails with casting - appendContextKeyword(context, " FETCH FIRST (", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - get_rule_expr(query->limitCount, context, false); - appendStringInfo(buf, ") ROWS WITH TIES"); - } - else - { - appendContextKeyword(context, " LIMIT ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - if (IsA(query->limitCount, Const) && - ((Const *) query->limitCount)->constisnull) - appendStringInfoString(buf, "ALL"); - else - get_rule_expr(query->limitCount, context, false); - } - } - - /* Add FOR [KEY] UPDATE/SHARE clauses if present */ - if (query->hasForUpdate) - { - foreach(l, query->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(l); - - /* don't print implicit clauses */ - if (rc->pushedDown) - continue; - - switch (rc->strength) - { - case LCS_NONE: - /* we intentionally throw an error for LCS_NONE */ - elog(ERROR, "unrecognized LockClauseStrength %d", - (int) rc->strength); - break; - case LCS_FORKEYSHARE: - appendContextKeyword(context, " FOR KEY SHARE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORSHARE: - appendContextKeyword(context, " FOR SHARE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORNOKEYUPDATE: - appendContextKeyword(context, " FOR NO KEY UPDATE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORUPDATE: - appendContextKeyword(context, " FOR UPDATE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - } - - appendStringInfo(buf, " OF %s", - quote_identifier(get_rtable_name(rc->rti, - context))); - if (rc->waitPolicy == LockWaitError) - appendStringInfoString(buf, " NOWAIT"); - else if (rc->waitPolicy == LockWaitSkip) - appendStringInfoString(buf, " SKIP LOCKED"); - } - } - - context->windowClause = save_windowclause; - context->windowTList = save_windowtlist; -} - -/* - * Detect whether query looks like SELECT ... FROM VALUES(); - * if so, return the VALUES RTE. Otherwise return NULL. - */ -static RangeTblEntry * -get_simple_values_rte(Query *query, TupleDesc resultDesc) -{ - RangeTblEntry *result = NULL; - ListCell *lc; - int colno; - - /* - * We want to return true even if the Query also contains OLD or NEW rule - * RTEs. So the idea is to scan the rtable and see if there is only one - * inFromCl RTE that is a VALUES RTE. - */ - foreach(lc, query->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - - if (rte->rtekind == RTE_VALUES && rte->inFromCl) - { - if (result) - return NULL; /* multiple VALUES (probably not possible) */ - result = rte; - } - else if (rte->rtekind == RTE_RELATION && !rte->inFromCl) - continue; /* ignore rule entries */ - else - return NULL; /* something else -> not simple VALUES */ - } - - /* - * We don't need to check the targetlist in any great detail, because - * parser/analyze.c will never generate a "bare" VALUES RTE --- they only - * appear inside auto-generated sub-queries with very restricted - * structure. However, DefineView might have modified the tlist by - * injecting new column aliases; so compare tlist resnames against the - * RTE's names to detect that. - */ - if (result) - { - ListCell *lcn; - - if (list_length(query->targetList) != list_length(result->eref->colnames)) - return NULL; /* this probably cannot happen */ - colno = 0; - forboth(lc, query->targetList, lcn, result->eref->colnames) - { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - char *cname = strVal(lfirst(lcn)); - char *colname; - - if (tle->resjunk) - return NULL; /* this probably cannot happen */ - /* compute name that get_target_list would use for column */ - colno++; - if (resultDesc && colno <= resultDesc->natts) - colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname); - else - colname = tle->resname; - - /* does it match the VALUES RTE? */ - if (colname == NULL || strcmp(colname, cname) != 0) - return NULL; /* column name has been changed */ - } - } - - return result; -} - -static void -get_basic_select_query(Query *query, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - RangeTblEntry *values_rte; - char *sep; - ListCell *l; - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - - /* - * If the query looks like SELECT * FROM (VALUES ...), then print just the - * VALUES part. This reverses what transformValuesClause() did at parse - * time. - */ - values_rte = get_simple_values_rte(query, resultDesc); - if (values_rte) - { - get_values_def(values_rte->values_lists, context); - return; - } - - /* - * Build up the query string - first we say SELECT - */ - appendStringInfoString(buf, "SELECT"); - - /* Add the DISTINCT clause if given */ - if (query->distinctClause != NIL) - { - if (query->hasDistinctOn) - { - appendStringInfoString(buf, " DISTINCT ON ("); - sep = ""; - foreach(l, query->distinctClause) - { - SortGroupClause *srt = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(srt->tleSortGroupRef, query->targetList, - false, context); - sep = ", "; - } - appendStringInfoChar(buf, ')'); - } - else - appendStringInfoString(buf, " DISTINCT"); - } - - /* Then we tell what to select (the targetlist) */ - get_target_list(query->targetList, context, resultDesc); - - /* Add the FROM clause if needed */ - get_from_clause(query, " FROM ", context); - - /* Add the WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add the GROUP BY clause if given */ - if (query->groupClause != NULL || query->groupingSets != NULL) - { - ParseExprKind save_exprkind; - - appendContextKeyword(context, " GROUP BY ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - - save_exprkind = context->special_exprkind; - context->special_exprkind = EXPR_KIND_GROUP_BY; - - if (query->groupingSets == NIL) - { - sep = ""; - foreach(l, query->groupClause) - { - SortGroupClause *grp = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, query->targetList, - false, context); - sep = ", "; - } - } - else - { - sep = ""; - foreach(l, query->groupingSets) - { - GroupingSet *grp = lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_groupingset(grp, query->targetList, true, context); - sep = ", "; - } - } - - context->special_exprkind = save_exprkind; - } - - /* Add the HAVING clause if given */ - if (query->havingQual != NULL) - { - appendContextKeyword(context, " HAVING ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - get_rule_expr(query->havingQual, context, false); - } - - /* Add the WINDOW clause if needed */ - if (query->windowClause != NIL) - get_rule_windowclause(query, context); -} - -/* ---------- - * get_target_list - Parse back a SELECT target list - * - * This is also used for RETURNING lists in INSERT/UPDATE/DELETE. - * ---------- - */ -static void -get_target_list(List *targetList, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - StringInfoData targetbuf; - bool last_was_multiline = false; - char *sep; - int colno; - ListCell *l; - - /* we use targetbuf to hold each TLE's text temporarily */ - initStringInfo(&targetbuf); - - sep = " "; - colno = 0; - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - char *colname; - char *attname; - - if (tle->resjunk) - continue; /* ignore junk entries */ - - appendStringInfoString(buf, sep); - sep = ", "; - colno++; - - /* - * Put the new field text into targetbuf so we can decide after we've - * got it whether or not it needs to go on a new line. - */ - resetStringInfo(&targetbuf); - context->buf = &targetbuf; - - /* - * We special-case Var nodes rather than using get_rule_expr. This is - * needed because get_rule_expr will display a whole-row Var as - * "foo.*", which is the preferred notation in most contexts, but at - * the top level of a SELECT list it's not right (the parser will - * expand that notation into multiple columns, yielding behavior - * different from a whole-row Var). We need to call get_variable - * directly so that we can tell it to do the right thing, and so that - * we can get the attribute name which is the default AS label. - */ - if (tle->expr && (IsA(tle->expr, Var))) - { - attname = get_variable((Var *) tle->expr, 0, true, context); - } - else - { - get_rule_expr((Node *) tle->expr, context, true); - /* We'll show the AS name unless it's this: */ - attname = "?column?"; - } - - /* - * Figure out what the result column should be called. In the context - * of a view, use the view's tuple descriptor (so as to pick up the - * effects of any column RENAME that's been done on the view). - * Otherwise, just use what we can find in the TLE. - */ - if (resultDesc && colno <= resultDesc->natts) - colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname); - else - colname = tle->resname; - - /* Show AS unless the column's name is correct as-is */ - if (colname) /* resname could be NULL */ - { - if (attname == NULL || strcmp(attname, colname) != 0) - appendStringInfo(&targetbuf, " AS %s", quote_identifier(colname)); - } - - /* Restore context's output buffer */ - context->buf = buf; - - /* Consider line-wrapping if enabled */ - if (PRETTY_INDENT(context) && context->wrapColumn >= 0) - { - int leading_nl_pos; - - /* Does the new field start with a new line? */ - if (targetbuf.len > 0 && targetbuf.data[0] == '\n') - leading_nl_pos = 0; - else - leading_nl_pos = -1; - - /* If so, we shouldn't add anything */ - if (leading_nl_pos >= 0) - { - /* instead, remove any trailing spaces currently in buf */ - removeStringInfoSpaces(buf); - } - else - { - char *trailing_nl; - - /* Locate the start of the current line in the output buffer */ - trailing_nl = strrchr(buf->data, '\n'); - if (trailing_nl == NULL) - trailing_nl = buf->data; - else - trailing_nl++; - - /* - * Add a newline, plus some indentation, if the new field is - * not the first and either the new field would cause an - * overflow or the last field used more than one line. - */ - if (colno > 1 && - ((strlen(trailing_nl) + targetbuf.len > context->wrapColumn) || - last_was_multiline)) - appendContextKeyword(context, "", -PRETTYINDENT_STD, - PRETTYINDENT_STD, PRETTYINDENT_VAR); - } - - /* Remember this field's multiline status for next iteration */ - last_was_multiline = - (strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL); - } - - /* Add the new field */ - appendStringInfoString(buf, targetbuf.data); - } - - /* clean up */ - pfree(targetbuf.data); -} - -static void -get_setop_query(Node *setOp, Query *query, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - bool need_paren; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - if (IsA(setOp, RangeTblRef)) - { - RangeTblRef *rtr = (RangeTblRef *) setOp; - RangeTblEntry *rte = rt_fetch(rtr->rtindex, query->rtable); - Query *subquery = rte->subquery; - - Assert(subquery != NULL); - Assert(subquery->setOperations == NULL); - /* Need parens if WITH, ORDER BY, FOR UPDATE, or LIMIT; see gram.y */ - need_paren = (subquery->cteList || - subquery->sortClause || - subquery->rowMarks || - subquery->limitOffset || - subquery->limitCount); - if (need_paren) - appendStringInfoChar(buf, '('); - get_query_def(subquery, buf, context->namespaces, resultDesc, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - if (need_paren) - appendStringInfoChar(buf, ')'); - } - else if (IsA(setOp, SetOperationStmt)) - { - SetOperationStmt *op = (SetOperationStmt *) setOp; - int subindent; - - /* - * We force parens when nesting two SetOperationStmts, except when the - * lefthand input is another setop of the same kind. Syntactically, - * we could omit parens in rather more cases, but it seems best to use - * parens to flag cases where the setop operator changes. If we use - * parens, we also increase the indentation level for the child query. - * - * There are some cases in which parens are needed around a leaf query - * too, but those are more easily handled at the next level down (see - * code above). - */ - if (IsA(op->larg, SetOperationStmt)) - { - SetOperationStmt *lop = (SetOperationStmt *) op->larg; - - if (op->op == lop->op && op->all == lop->all) - need_paren = false; - else - need_paren = true; - } - else - need_paren = false; - - if (need_paren) - { - appendStringInfoChar(buf, '('); - subindent = PRETTYINDENT_STD; - appendContextKeyword(context, "", subindent, 0, 0); - } - else - subindent = 0; - - get_setop_query(op->larg, query, context, resultDesc); - - if (need_paren) - appendContextKeyword(context, ") ", -subindent, 0, 0); - else if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", -subindent, 0, 0); - else - appendStringInfoChar(buf, ' '); - - switch (op->op) - { - case SETOP_UNION: - appendStringInfoString(buf, "UNION "); - break; - case SETOP_INTERSECT: - appendStringInfoString(buf, "INTERSECT "); - break; - case SETOP_EXCEPT: - appendStringInfoString(buf, "EXCEPT "); - break; - default: - elog(ERROR, "unrecognized set op: %d", - (int) op->op); - } - if (op->all) - appendStringInfoString(buf, "ALL "); - - /* Always parenthesize if RHS is another setop */ - need_paren = IsA(op->rarg, SetOperationStmt); - - /* - * The indentation code here is deliberately a bit different from that - * for the lefthand input, because we want the line breaks in - * different places. - */ - if (need_paren) - { - appendStringInfoChar(buf, '('); - subindent = PRETTYINDENT_STD; - } - else - subindent = 0; - appendContextKeyword(context, "", subindent, 0, 0); - - get_setop_query(op->rarg, query, context, resultDesc); - - if (PRETTY_INDENT(context)) - context->indentLevel -= subindent; - if (need_paren) - appendContextKeyword(context, ")", 0, 0, 0); - } - else - { - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(setOp)); - } -} - -/* - * Display a sort/group clause. - * - * Also returns the expression tree, so caller need not find it again. - */ -static Node * -get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno, - deparse_context *context) -{ - StringInfo buf = context->buf; - TargetEntry *tle; - Node *expr; - - tle = get_sortgroupref_tle(ref, tlist); - expr = (Node *) tle->expr; - - /* - * Use column-number form if requested by caller. Otherwise, if - * expression is a constant, force it to be dumped with an explicit cast - * as decoration --- this is because a simple integer constant is - * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we - * dump it without any decoration. If it's anything more complex than a - * simple Var, then force extra parens around it, to ensure it can't be - * misinterpreted as a cube() or rollup() construct. - */ - if (force_colno) - { - Assert(!tle->resjunk); - appendStringInfo(buf, "%d", tle->resno); - } - else if (expr && IsA(expr, Const)) - get_const_expr((Const *) expr, context, 1); - else if (!expr || IsA(expr, Var)) - get_rule_expr(expr, context, true); - else - { - /* - * We must force parens for function-like expressions even if - * PRETTY_PAREN is off, since those are the ones in danger of - * misparsing. For other expressions we need to force them only if - * PRETTY_PAREN is on, since otherwise the expression will output them - * itself. (We can't skip the parens.) - */ - bool need_paren = (PRETTY_PAREN(context) - || IsA(expr, FuncExpr) - ||IsA(expr, Aggref) - ||IsA(expr, WindowFunc)); - - if (need_paren) - appendStringInfoChar(context->buf, '('); - get_rule_expr(expr, context, true); - if (need_paren) - appendStringInfoChar(context->buf, ')'); - } - - return expr; -} - -/* - * Display a GroupingSet - */ -static void -get_rule_groupingset(GroupingSet *gset, List *targetlist, - bool omit_parens, deparse_context *context) -{ - ListCell *l; - StringInfo buf = context->buf; - bool omit_child_parens = true; - char *sep = ""; - - switch (gset->kind) - { - case GROUPING_SET_EMPTY: - appendStringInfoString(buf, "()"); - return; - - case GROUPING_SET_SIMPLE: - { - if (!omit_parens || list_length(gset->content) != 1) - appendStringInfoChar(buf, '('); - - foreach(l, gset->content) - { - Index ref = lfirst_int(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(ref, targetlist, - false, context); - sep = ", "; - } - - if (!omit_parens || list_length(gset->content) != 1) - appendStringInfoChar(buf, ')'); - } - return; - - case GROUPING_SET_ROLLUP: - appendStringInfoString(buf, "ROLLUP("); - break; - case GROUPING_SET_CUBE: - appendStringInfoString(buf, "CUBE("); - break; - case GROUPING_SET_SETS: - appendStringInfoString(buf, "GROUPING SETS ("); - omit_child_parens = false; - break; - } - - foreach(l, gset->content) - { - appendStringInfoString(buf, sep); - get_rule_groupingset(lfirst(l), targetlist, omit_child_parens, context); - sep = ", "; - } - - appendStringInfoChar(buf, ')'); -} - -/* - * Display an ORDER BY list. - */ -static void -get_rule_orderby(List *orderList, List *targetList, - bool force_colno, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - sep = ""; - foreach(l, orderList) - { - SortGroupClause *srt = (SortGroupClause *) lfirst(l); - Node *sortexpr; - Oid sortcoltype; - TypeCacheEntry *typentry; - - appendStringInfoString(buf, sep); - sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, - force_colno, context); - sortcoltype = exprType(sortexpr); - /* See whether operator is default < or > for datatype */ - typentry = lookup_type_cache(sortcoltype, - TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); - if (srt->sortop == typentry->lt_opr) - { - /* ASC is default, so emit nothing for it */ - if (srt->nulls_first) - appendStringInfoString(buf, " NULLS FIRST"); - } - else if (srt->sortop == typentry->gt_opr) - { - appendStringInfoString(buf, " DESC"); - /* DESC defaults to NULLS FIRST */ - if (!srt->nulls_first) - appendStringInfoString(buf, " NULLS LAST"); - } - else - { - appendStringInfo(buf, " USING %s", - generate_operator_name(srt->sortop, - sortcoltype, - sortcoltype)); - /* be specific to eliminate ambiguity */ - if (srt->nulls_first) - appendStringInfoString(buf, " NULLS FIRST"); - else - appendStringInfoString(buf, " NULLS LAST"); - } - sep = ", "; - } -} - -/* - * Display a WINDOW clause. - * - * Note that the windowClause list might contain only anonymous window - * specifications, in which case we should print nothing here. - */ -static void -get_rule_windowclause(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - sep = NULL; - foreach(l, query->windowClause) - { - WindowClause *wc = (WindowClause *) lfirst(l); - - if (wc->name == NULL) - continue; /* ignore anonymous windows */ - - if (sep == NULL) - appendContextKeyword(context, " WINDOW ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - else - appendStringInfoString(buf, sep); - - appendStringInfo(buf, "%s AS ", quote_identifier(wc->name)); - - get_rule_windowspec(wc, query->targetList, context); - - sep = ", "; - } -} - -/* - * Display a window definition - */ -static void -get_rule_windowspec(WindowClause *wc, List *targetList, - deparse_context *context) -{ - StringInfo buf = context->buf; - bool needspace = false; - const char *sep; - ListCell *l; - - appendStringInfoChar(buf, '('); - if (wc->refname) - { - appendStringInfoString(buf, quote_identifier(wc->refname)); - needspace = true; - } - /* partition clauses are always inherited, so only print if no refname */ - if (wc->partitionClause && !wc->refname) - { - if (needspace) - appendStringInfoChar(buf, ' '); - appendStringInfoString(buf, "PARTITION BY "); - sep = ""; - foreach(l, wc->partitionClause) - { - SortGroupClause *grp = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, - false, context); - sep = ", "; - } - needspace = true; - } - /* print ordering clause only if not inherited */ - if (wc->orderClause && !wc->copiedOrder) - { - if (needspace) - appendStringInfoChar(buf, ' '); - appendStringInfoString(buf, "ORDER BY "); - get_rule_orderby(wc->orderClause, targetList, false, context); - needspace = true; - } - /* framing clause is never inherited, so print unless it's default */ - if (wc->frameOptions & FRAMEOPTION_NONDEFAULT) - { - if (needspace) - appendStringInfoChar(buf, ' '); - if (wc->frameOptions & FRAMEOPTION_RANGE) - appendStringInfoString(buf, "RANGE "); - else if (wc->frameOptions & FRAMEOPTION_ROWS) - appendStringInfoString(buf, "ROWS "); - else if (wc->frameOptions & FRAMEOPTION_GROUPS) - appendStringInfoString(buf, "GROUPS "); - else - Assert(false); - if (wc->frameOptions & FRAMEOPTION_BETWEEN) - appendStringInfoString(buf, "BETWEEN "); - if (wc->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) - appendStringInfoString(buf, "UNBOUNDED PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_START_CURRENT_ROW) - appendStringInfoString(buf, "CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_START_OFFSET) - { - get_rule_expr(wc->startOffset, context, false); - if (wc->frameOptions & FRAMEOPTION_START_OFFSET_PRECEDING) - appendStringInfoString(buf, " PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_START_OFFSET_FOLLOWING) - appendStringInfoString(buf, " FOLLOWING "); - else - Assert(false); - } - else - Assert(false); - if (wc->frameOptions & FRAMEOPTION_BETWEEN) - { - appendStringInfoString(buf, "AND "); - if (wc->frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING) - appendStringInfoString(buf, "UNBOUNDED FOLLOWING "); - else if (wc->frameOptions & FRAMEOPTION_END_CURRENT_ROW) - appendStringInfoString(buf, "CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_END_OFFSET) - { - get_rule_expr(wc->endOffset, context, false); - if (wc->frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING) - appendStringInfoString(buf, " PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING) - appendStringInfoString(buf, " FOLLOWING "); - else - Assert(false); - } - else - Assert(false); - } - if (wc->frameOptions & FRAMEOPTION_EXCLUDE_CURRENT_ROW) - appendStringInfoString(buf, "EXCLUDE CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_GROUP) - appendStringInfoString(buf, "EXCLUDE GROUP "); - else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_TIES) - appendStringInfoString(buf, "EXCLUDE TIES "); - /* we will now have a trailing space; remove it */ - buf->len--; - } - appendStringInfoChar(buf, ')'); -} - -/* ---------- - * get_insert_query_def - Parse back an INSERT parsetree - * ---------- - */ -static void -get_insert_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *select_rte = NULL; - RangeTblEntry *values_rte = NULL; - RangeTblEntry *rte; - char *sep; - ListCell *l; - List *strippedexprs; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * If it's an INSERT ... SELECT or multi-row VALUES, there will be a - * single RTE for the SELECT or VALUES. Plain VALUES has neither. - */ - foreach(l, query->rtable) - { - rte = (RangeTblEntry *) lfirst(l); - - if (rte->rtekind == RTE_SUBQUERY) - { - if (select_rte) - elog(ERROR, "too many subquery RTEs in INSERT"); - select_rte = rte; - } - - if (rte->rtekind == RTE_VALUES) - { - if (values_rte) - elog(ERROR, "too many values RTEs in INSERT"); - values_rte = rte; - } - } - if (select_rte && values_rte) - elog(ERROR, "both subquery and values RTEs in INSERT"); - - /* - * Start the query with INSERT INTO relname - */ - rte = rt_fetch(query->resultRelation, query->rtable); - Assert(rte->rtekind == RTE_RELATION); - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - appendStringInfo(buf, "INSERT INTO %s ", - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - /* INSERT requires AS keyword for target alias */ - if (rte->alias != NULL) - appendStringInfo(buf, "AS %s ", - quote_identifier(get_rtable_name(query->resultRelation, context))); - - /* - * Add the insert-column-names list. Any indirection decoration needed on - * the column names can be inferred from the top targetlist. - */ - strippedexprs = NIL; - sep = ""; - if (query->targetList) - appendStringInfoChar(buf, '('); - foreach(l, query->targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - - if (tle->resjunk) - continue; /* ignore junk entries */ - - appendStringInfoString(buf, sep); - sep = ", "; - - /* - * Put out name of target column; look in the catalogs, not at - * tle->resname, since resname will fail to track RENAME. - */ - appendStringInfoString(buf, - quote_identifier(get_attname(rte->relid, - tle->resno, - false))); - - /* - * Print any indirection needed (subfields or subscripts), and strip - * off the top-level nodes representing the indirection assignments. - * Add the stripped expressions to strippedexprs. (If it's a - * single-VALUES statement, the stripped expressions are the VALUES to - * print below. Otherwise they're just Vars and not really - * interesting.) - */ - strippedexprs = lappend(strippedexprs, - processIndirection((Node *) tle->expr, - context)); - } - if (query->targetList) - appendStringInfoString(buf, ") "); - - if (query->override) - { - if (query->override == OVERRIDING_SYSTEM_VALUE) - appendStringInfoString(buf, "OVERRIDING SYSTEM VALUE "); - else if (query->override == OVERRIDING_USER_VALUE) - appendStringInfoString(buf, "OVERRIDING USER VALUE "); - } - - if (select_rte) - { - /* Add the SELECT */ - get_query_def(select_rte->subquery, buf, NIL, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - } - else if (values_rte) - { - /* Add the multi-VALUES expression lists */ - get_values_def(values_rte->values_lists, context); - } - else if (strippedexprs) - { - /* Add the single-VALUES expression list */ - appendContextKeyword(context, "VALUES (", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); - get_rule_expr((Node *) strippedexprs, context, false); - appendStringInfoChar(buf, ')'); - } - else - { - /* No expressions, so it must be DEFAULT VALUES */ - appendStringInfoString(buf, "DEFAULT VALUES"); - } - - /* Add ON CONFLICT if present */ - if (query->onConflict) - { - OnConflictExpr *confl = query->onConflict; - - appendStringInfoString(buf, " ON CONFLICT"); - - if (confl->arbiterElems) - { - /* Add the single-VALUES expression list */ - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) confl->arbiterElems, context, false); - appendStringInfoChar(buf, ')'); - - /* Add a WHERE clause (for partial indexes) if given */ - if (confl->arbiterWhere != NULL) - { - bool save_varprefix; - - /* - * Force non-prefixing of Vars, since parser assumes that they - * belong to target relation. WHERE clause does not use - * InferenceElem, so this is separately required. - */ - save_varprefix = context->varprefix; - context->varprefix = false; - - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(confl->arbiterWhere, context, false); - - context->varprefix = save_varprefix; - } - } - else if (OidIsValid(confl->constraint)) - { - char *constraint = get_constraint_name(confl->constraint); - int64 shardId = context->shardid; - - if (shardId > 0) - { - AppendShardIdToName(&constraint, shardId); - } - - if (!constraint) - elog(ERROR, "cache lookup failed for constraint %u", - confl->constraint); - appendStringInfo(buf, " ON CONSTRAINT %s", - quote_identifier(constraint)); - } - - if (confl->action == ONCONFLICT_NOTHING) - { - appendStringInfoString(buf, " DO NOTHING"); - } - else - { - appendStringInfoString(buf, " DO UPDATE SET "); - /* Deparse targetlist */ - get_update_query_targetlist_def(query, confl->onConflictSet, - context, rte); - - /* Add a WHERE clause if given */ - if (confl->onConflictWhere != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(confl->onConflictWhere, context, false); - } - } - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context, NULL); - } -} - - -/* ---------- - * get_update_query_def - Parse back an UPDATE parsetree - * ---------- - */ -static void -get_update_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * Start the query with UPDATE relname SET - */ - rte = rt_fetch(query->resultRelation, query->rtable); - - if (PRETTY_INDENT(context)) - { - appendStringInfoChar(buf, ' '); - context->indentLevel += PRETTYINDENT_STD; - } - - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "UPDATE %s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, fragmentTableName)); - - if(rte->eref != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - else - { - appendStringInfo(buf, "UPDATE %s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - - if (rte->alias != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - - appendStringInfoString(buf, " SET "); - - /* Deparse targetlist */ - get_update_query_targetlist_def(query, query->targetList, context, rte); - - /* Add the FROM clause if needed */ - get_from_clause(query, " FROM ", context); - - /* Add a WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context, NULL); - } -} - - -/* ---------- - * get_update_query_targetlist_def - Parse back an UPDATE targetlist - * ---------- - */ -static void -get_update_query_targetlist_def(Query *query, List *targetList, - deparse_context *context, RangeTblEntry *rte) -{ - StringInfo buf = context->buf; - ListCell *l; - ListCell *next_ma_cell; - int remaining_ma_columns; - const char *sep; - SubLink *cur_ma_sublink; - List *ma_sublinks; - - /* - * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks - * into a list. We expect them to appear, in ID order, in resjunk tlist - * entries. - */ - ma_sublinks = NIL; - if (query->hasSubLinks) /* else there can't be any */ - { - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - - if (tle->resjunk && IsA(tle->expr, SubLink)) - { - SubLink *sl = (SubLink *) tle->expr; - - if (sl->subLinkType == MULTIEXPR_SUBLINK) - { - ma_sublinks = lappend(ma_sublinks, sl); - Assert(sl->subLinkId == list_length(ma_sublinks)); - } - } - } - } - next_ma_cell = list_head(ma_sublinks); - cur_ma_sublink = NULL; - remaining_ma_columns = 0; - - /* Add the comma separated list of 'attname = value' */ - sep = ""; - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - Node *expr; - - if (tle->resjunk) - continue; /* ignore junk entries */ - - /* Emit separator (OK whether we're in multiassignment or not) */ - appendStringInfoString(buf, sep); - sep = ", "; - - /* - * Check to see if we're starting a multiassignment group: if so, - * output a left paren. - */ - if (next_ma_cell != NULL && cur_ma_sublink == NULL) - { - /* - * We must dig down into the expr to see if it's a PARAM_MULTIEXPR - * Param. That could be buried under FieldStores and - * SubscriptingRefs and CoerceToDomains (cf processIndirection()), - * and underneath those there could be an implicit type coercion. - * Because we would ignore implicit type coercions anyway, we - * don't need to be as careful as processIndirection() is about - * descending past implicit CoerceToDomains. - */ - expr = (Node *) tle->expr; - while (expr) - { - if (IsA(expr, FieldStore)) - { - FieldStore *fstore = (FieldStore *) expr; - - expr = (Node *) linitial(fstore->newvals); - } - else if (IsA(expr, SubscriptingRef)) - { - SubscriptingRef *sbsref = (SubscriptingRef *) expr; - - if (sbsref->refassgnexpr == NULL) - break; - expr = (Node *) sbsref->refassgnexpr; - } - else if (IsA(expr, CoerceToDomain)) - { - CoerceToDomain *cdomain = (CoerceToDomain *) expr; - - if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) - break; - expr = (Node *) cdomain->arg; - } - else - break; - } - expr = strip_implicit_coercions(expr); - - if (expr && IsA(expr, Param) && - ((Param *) expr)->paramkind == PARAM_MULTIEXPR) - { - cur_ma_sublink = (SubLink *) lfirst(next_ma_cell); - next_ma_cell = lnext(ma_sublinks, next_ma_cell); - remaining_ma_columns = count_nonjunk_tlist_entries( - ((Query *) cur_ma_sublink->subselect)->targetList); - Assert(((Param *) expr)->paramid == - ((cur_ma_sublink->subLinkId << 16) | 1)); - appendStringInfoChar(buf, '('); - } - } - - /* - * Put out name of target column; look in the catalogs, not at - * tle->resname, since resname will fail to track RENAME. - */ - appendStringInfoString(buf, - quote_identifier(get_attname(rte->relid, - tle->resno, - false))); - - /* - * Print any indirection needed (subfields or subscripts), and strip - * off the top-level nodes representing the indirection assignments. - */ - expr = processIndirection((Node *) tle->expr, context); - - /* - * If we're in a multiassignment, skip printing anything more, unless - * this is the last column; in which case, what we print should be the - * sublink, not the Param. - */ - if (cur_ma_sublink != NULL) - { - if (--remaining_ma_columns > 0) - continue; /* not the last column of multiassignment */ - appendStringInfoChar(buf, ')'); - expr = (Node *) cur_ma_sublink; - cur_ma_sublink = NULL; - } - - appendStringInfoString(buf, " = "); - - get_rule_expr(expr, context, false); - } -} - - -/* ---------- - * get_delete_query_def - Parse back a DELETE parsetree - * ---------- - */ -static void -get_delete_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * Start the query with DELETE FROM relname - */ - rte = rt_fetch(query->resultRelation, query->rtable); - - if (PRETTY_INDENT(context)) - { - appendStringInfoChar(buf, ' '); - context->indentLevel += PRETTYINDENT_STD; - } - - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "DELETE FROM %s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, fragmentTableName)); - - if(rte->eref != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - else - { - appendStringInfo(buf, "DELETE FROM %s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - - if (rte->alias != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - - /* Add the USING clause if given */ - get_from_clause(query, " USING ", context); - - /* Add a WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context, NULL); - } -} - - -/* ---------- - * get_utility_query_def - Parse back a UTILITY parsetree - * ---------- - */ -static void -get_utility_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - - if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt)) - { - NotifyStmt *stmt = (NotifyStmt *) query->utilityStmt; - - appendContextKeyword(context, "", - 0, PRETTYINDENT_STD, 1); - appendStringInfo(buf, "NOTIFY %s", - quote_identifier(stmt->conditionname)); - if (stmt->payload) - { - appendStringInfoString(buf, ", "); - simple_quote_literal(buf, stmt->payload); - } - } - else if (query->utilityStmt && IsA(query->utilityStmt, TruncateStmt)) - { - TruncateStmt *stmt = (TruncateStmt *) query->utilityStmt; - List *relationList = stmt->relations; - ListCell *relationCell = NULL; - - appendContextKeyword(context, "", - 0, PRETTYINDENT_STD, 1); - - appendStringInfo(buf, "TRUNCATE TABLE"); - - foreach(relationCell, relationList) - { - RangeVar *relationVar = (RangeVar *) lfirst(relationCell); - Oid relationId = RangeVarGetRelid(relationVar, NoLock, false); - char *relationName = generate_relation_or_shard_name(relationId, - context->distrelid, - context->shardid, NIL); - appendStringInfo(buf, " %s", relationName); - - if (lnext(relationList, relationCell) != NULL) - { - appendStringInfo(buf, ","); - } - } - - if (stmt->restart_seqs) - { - appendStringInfo(buf, " RESTART IDENTITY"); - } - - if (stmt->behavior == DROP_CASCADE) - { - appendStringInfo(buf, " CASCADE"); - } - } - else - { - /* Currently only NOTIFY utility commands can appear in rules */ - elog(ERROR, "unexpected utility statement type"); - } -} - -/* - * Display a Var appropriately. - * - * In some cases (currently only when recursing into an unnamed join) - * the Var's varlevelsup has to be interpreted with respect to a context - * above the current one; levelsup indicates the offset. - * - * If istoplevel is true, the Var is at the top level of a SELECT's - * targetlist, which means we need special treatment of whole-row Vars. - * Instead of the normal "tab.*", we'll print "tab.*::typename", which is a - * dirty hack to prevent "tab.*" from being expanded into multiple columns. - * (The parser will strip the useless coercion, so no inefficiency is added in - * dump and reload.) We used to print just "tab" in such cases, but that is - * ambiguous and will yield the wrong result if "tab" is also a plain column - * name in the query. - * - * Returns the attname of the Var, or NULL if the Var has no attname (because - * it is a whole-row Var or a subplan output reference). - */ -static char * -get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - AttrNumber attnum; - Index varno; - AttrNumber varattno; - int netlevelsup; - deparse_namespace *dpns; - deparse_columns *colinfo; - char *refname; - char *attname; - - /* Find appropriate nesting depth */ - netlevelsup = var->varlevelsup + levelsup; - if (netlevelsup >= list_length(context->namespaces)) - elog(ERROR, "bogus varlevelsup: %d offset %d", - var->varlevelsup, levelsup); - dpns = (deparse_namespace *) list_nth(context->namespaces, - netlevelsup); - - varno = var->varno; - varattno = var->varattno; - - - if (var->varnosyn > 0 && var->varnosyn <= list_length(dpns->rtable) && dpns->plan == NULL) { - rte = rt_fetch(var->varnosyn, dpns->rtable); - - /* - * if the rte var->varnosyn points to is not a regular table and it is a join - * then the correct relname will be found with var->varnosyn and var->varattnosyn - */ - if (rte->rtekind == RTE_JOIN && rte->relid == 0 && var->varnosyn != var->varno) { - varno = var->varnosyn; - varattno = var->varattnosyn; - } - } - - /* - * Try to find the relevant RTE in this rtable. In a plan tree, it's - * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig - * down into the subplans, or INDEX_VAR, which is resolved similarly. Also - * find the aliases previously assigned for this RTE. - */ - if (varno >= 1 && varno <= list_length(dpns->rtable)) - { - - /* - * We might have been asked to map child Vars to some parent relation. - */ - if (context->appendparents && dpns->appendrels) - { - - Index pvarno = varno; - AttrNumber pvarattno = varattno; - AppendRelInfo *appinfo = dpns->appendrels[pvarno]; - bool found = false; - - /* Only map up to inheritance parents, not UNION ALL appendrels */ - while (appinfo && - rt_fetch(appinfo->parent_relid, - dpns->rtable)->rtekind == RTE_RELATION) - { - found = false; - if (pvarattno > 0) /* system columns stay as-is */ - { - if (pvarattno > appinfo->num_child_cols) - break; /* safety check */ - pvarattno = appinfo->parent_colnos[pvarattno - 1]; - if (pvarattno == 0) - break; /* Var is local to child */ - } - - pvarno = appinfo->parent_relid; - found = true; - - /* If the parent is itself a child, continue up. */ - Assert(pvarno > 0 && pvarno <= list_length(dpns->rtable)); - appinfo = dpns->appendrels[pvarno]; - } - - /* - * If we found an ancestral rel, and that rel is included in - * appendparents, print that column not the original one. - */ - if (found && bms_is_member(pvarno, context->appendparents)) - { - varno = pvarno; - varattno = pvarattno; - } - } - - rte = rt_fetch(varno, dpns->rtable); - refname = (char *) list_nth(dpns->rtable_names, varno - 1); - colinfo = deparse_columns_fetch(varno, dpns); - attnum = varattno; - } - else - { - resolve_special_varno((Node *) var, context, get_special_variable, - NULL); - return NULL; - } - - /* - * The planner will sometimes emit Vars referencing resjunk elements of a - * subquery's target list (this is currently only possible if it chooses - * to generate a "physical tlist" for a SubqueryScan or CteScan node). - * Although we prefer to print subquery-referencing Vars using the - * subquery's alias, that's not possible for resjunk items since they have - * no alias. So in that case, drill down to the subplan and print the - * contents of the referenced tlist item. This works because in a plan - * tree, such Vars can only occur in a SubqueryScan or CteScan node, and - * we'll have set dpns->inner_plan to reference the child plan node. - */ - if ((rte->rtekind == RTE_SUBQUERY || rte->rtekind == RTE_CTE) && - attnum > list_length(rte->eref->colnames) && - dpns->inner_plan) - { - TargetEntry *tle; - deparse_namespace save_dpns; - - tle = get_tle_by_resno(dpns->inner_tlist, attnum); - if (!tle) - elog(ERROR, "invalid attnum %d for relation \"%s\"", - attnum, rte->eref->aliasname); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_plan, &save_dpns); - - /* - * Force parentheses because our caller probably assumed a Var is a - * simple expression. - */ - if (!IsA(tle->expr, Var)) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) tle->expr, context, true); - if (!IsA(tle->expr, Var)) - appendStringInfoChar(buf, ')'); - - pop_child_plan(dpns, &save_dpns); - return NULL; - } - - /* - * If it's an unnamed join, look at the expansion of the alias variable. - * If it's a simple reference to one of the input vars, then recursively - * print the name of that var instead. When it's not a simple reference, - * we have to just print the unqualified join column name. (This can only - * happen with "dangerous" merged columns in a JOIN USING; we took pains - * previously to make the unqualified column name unique in such cases.) - * - * This wouldn't work in decompiling plan trees, because we don't store - * joinaliasvars lists after planning; but a plan tree should never - * contain a join alias variable. - */ - if (rte->rtekind == RTE_JOIN && rte->alias == NULL) - { - if (rte->joinaliasvars == NIL) - elog(ERROR, "cannot decompile join alias var in plan tree"); - if (attnum > 0) - { - Var *aliasvar; - - aliasvar = (Var *) list_nth(rte->joinaliasvars, attnum - 1); - /* we intentionally don't strip implicit coercions here */ - if (aliasvar && IsA(aliasvar, Var)) - { - return get_variable(aliasvar, var->varlevelsup + levelsup, - istoplevel, context); - } - } - - /* - * Unnamed join has no refname. (Note: since it's unnamed, there is - * no way the user could have referenced it to create a whole-row Var - * for it. So we don't have to cover that case below.) - */ - Assert(refname == NULL); - } - - if (attnum == InvalidAttrNumber) - attname = NULL; - else if (attnum > 0) - { - /* Get column name to use from the colinfo struct */ - if (attnum > colinfo->num_cols) - elog(ERROR, "invalid attnum %d for relation \"%s\"", - attnum, rte->eref->aliasname); - attname = colinfo->colnames[attnum - 1]; - if (attname == NULL) /* dropped column? */ - elog(ERROR, "invalid attnum %d for relation \"%s\"", - attnum, rte->eref->aliasname); - } - else if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - /* System column on a Citus shard */ - attname = get_attname(rte->relid, attnum, false); - } - else - { - /* System column - name is fixed, get it from the catalog */ - attname = get_rte_attribute_name(rte, attnum); - } - - if (refname && (context->varprefix || attname == NULL)) - { - appendStringInfoString(buf, quote_identifier(refname)); - appendStringInfoChar(buf, '.'); - } - if (attname) - appendStringInfoString(buf, quote_identifier(attname)); - else - { - appendStringInfoChar(buf, '*'); - - if (istoplevel) - { - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - /* use rel.*::shard_name instead of rel.*::table_name */ - appendStringInfo(buf, "::%s", - generate_rte_shard_name(rte)); - } - else - { - appendStringInfo(buf, "::%s", - format_type_with_typemod(var->vartype, - var->vartypmod)); - } - } - } - - return attname; -} - -/* - * Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This - * routine is actually a callback for get_special_varno, which handles finding - * the correct TargetEntry. We get the expression contained in that - * TargetEntry and just need to deparse it, a job we can throw back on - * get_rule_expr. - */ -static void -get_special_variable(Node *node, deparse_context *context, void *callback_arg) -{ - StringInfo buf = context->buf; - - /* - * For a non-Var referent, force parentheses because our caller probably - * assumed a Var is a simple expression. - */ - if (!IsA(node, Var)) - appendStringInfoChar(buf, '('); - get_rule_expr(node, context, true); - if (!IsA(node, Var)) - appendStringInfoChar(buf, ')'); -} - -/* - * Chase through plan references to special varnos (OUTER_VAR, INNER_VAR, - * INDEX_VAR) until we find a real Var or some kind of non-Var node; then, - * invoke the callback provided. - */ -static void -resolve_special_varno(Node *node, deparse_context *context, rsv_callback callback, void *callback_arg) -{ - Var *var; - deparse_namespace *dpns; - - /* This function is recursive, so let's be paranoid. */ - check_stack_depth(); - - /* If it's not a Var, invoke the callback. */ - if (!IsA(node, Var)) - { - (*callback) (node, context, callback_arg); - return; - } - - /* Find appropriate nesting depth */ - var = (Var *) node; - dpns = (deparse_namespace *) list_nth(context->namespaces, - var->varlevelsup); - - /* - * It's a special RTE, so recurse. - */ - if (var->varno == OUTER_VAR && dpns->outer_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - Bitmapset *save_appendparents; - - tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); - - /* If we're descending to the first child of an Append or MergeAppend, - * update appendparents. This will affect deparsing of all Vars - * appearing within the eventually-resolved subexpression. - */ - save_appendparents = context->appendparents; - - if (IsA(dpns->plan, Append)) - context->appendparents = bms_union(context->appendparents, - ((Append *) dpns->plan)->apprelids); - else if (IsA(dpns->plan, MergeAppend)) - context->appendparents = bms_union(context->appendparents, - ((MergeAppend *) dpns->plan)->apprelids); - - push_child_plan(dpns, dpns->outer_plan, &save_dpns); - resolve_special_varno((Node *) tle->expr, context, - callback, callback_arg); - pop_child_plan(dpns, &save_dpns); - context->appendparents = save_appendparents; - return; - } - else if (var->varno == INNER_VAR && dpns->inner_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - - tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); - - push_child_plan(dpns, dpns->inner_plan, &save_dpns); - resolve_special_varno((Node *) tle->expr, context, callback, callback_arg); - pop_child_plan(dpns, &save_dpns); - return; - } - else if (var->varno == INDEX_VAR && dpns->index_tlist) - { - TargetEntry *tle; - - tle = get_tle_by_resno(dpns->index_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); - - resolve_special_varno((Node *) tle->expr, context, callback, callback_arg); - return; - } - else if (var->varno < 1 || var->varno > list_length(dpns->rtable)) - elog(ERROR, "bogus varno: %d", var->varno); - - /* Not special. Just invoke the callback. */ - (*callback) (node, context, callback_arg); -} - -/* - * Get the name of a field of an expression of composite type. The - * expression is usually a Var, but we handle other cases too. - * - * levelsup is an extra offset to interpret the Var's varlevelsup correctly. - * - * This is fairly straightforward when the expression has a named composite - * type; we need only look up the type in the catalogs. However, the type - * could also be RECORD. Since no actual table or view column is allowed to - * have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE - * or to a subquery output. We drill down to find the ultimate defining - * expression and attempt to infer the field name from it. We ereport if we - * can't determine the name. - * - * Similarly, a PARAM of type RECORD has to refer to some expression of - * a determinable composite type. - */ -static const char * -get_name_for_var_field(Var *var, int fieldno, - int levelsup, deparse_context *context) -{ - RangeTblEntry *rte; - AttrNumber attnum; - int netlevelsup; - deparse_namespace *dpns; - Index varno; - AttrNumber varattno; - TupleDesc tupleDesc; - Node *expr; - - /* - * If it's a RowExpr that was expanded from a whole-row Var, use the - * column names attached to it. - */ - if (IsA(var, RowExpr)) - { - RowExpr *r = (RowExpr *) var; - - if (fieldno > 0 && fieldno <= list_length(r->colnames)) - return strVal(list_nth(r->colnames, fieldno - 1)); - } - - /* - * If it's a Param of type RECORD, try to find what the Param refers to. - */ - if (IsA(var, Param)) - { - Param *param = (Param *) var; - ListCell *ancestor_cell; - - expr = find_param_referent(param, context, &dpns, &ancestor_cell); - if (expr) - { - /* Found a match, so recurse to decipher the field name */ - deparse_namespace save_dpns; - const char *result; - - push_ancestor_plan(dpns, ancestor_cell, &save_dpns); - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - pop_ancestor_plan(dpns, &save_dpns); - return result; - } - } - - /* - * If it's a Var of type RECORD, we have to find what the Var refers to; - * if not, we can use get_expr_result_tupdesc(). - */ - if (!IsA(var, Var) || - var->vartype != RECORDOID) - { - tupleDesc = get_expr_result_tupdesc((Node *) var, false); - /* Got the tupdesc, so we can extract the field name */ - Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); - return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname); - } - - /* Find appropriate nesting depth */ - netlevelsup = var->varlevelsup + levelsup; - if (netlevelsup >= list_length(context->namespaces)) - elog(ERROR, "bogus varlevelsup: %d offset %d", - var->varlevelsup, levelsup); - dpns = (deparse_namespace *) list_nth(context->namespaces, - netlevelsup); - - varno = var->varno; - varattno = var->varattno; - - if (var->varnosyn > 0 && var->varnosyn <= list_length(dpns->rtable) && dpns->plan == NULL) { - rte = rt_fetch(var->varnosyn, dpns->rtable); - - /* - * if the rte var->varnosyn points to is not a regular table and it is a join - * then the correct relname will be found with var->varnosyn and var->varattnosyn - */ - if (rte->rtekind == RTE_JOIN && rte->relid == 0 && var->varnosyn != var->varno) { - varno = var->varnosyn; - varattno = var->varattnosyn; - } - } - - /* - * Try to find the relevant RTE in this rtable. In a plan tree, it's - * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig - * down into the subplans, or INDEX_VAR, which is resolved similarly. - */ - if (varno >= 1 && varno <= list_length(dpns->rtable)) - { - rte = rt_fetch(varno, dpns->rtable); - attnum = varattno; - } - else if (varno == OUTER_VAR && dpns->outer_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - tle = get_tle_by_resno(dpns->outer_tlist, varattno); - if (!tle) - elog(ERROR, "bogus varattno for OUTER_VAR var: %d", varattno); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->outer_plan, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - else if (varno == INNER_VAR && dpns->inner_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - tle = get_tle_by_resno(dpns->inner_tlist, varattno); - if (!tle) - elog(ERROR, "bogus varattno for INNER_VAR var: %d", varattno); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_plan, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - else if (varno == INDEX_VAR && dpns->index_tlist) - { - TargetEntry *tle; - const char *result; - - tle = get_tle_by_resno(dpns->index_tlist, varattno); - if (!tle) - elog(ERROR, "bogus varattno for INDEX_VAR var: %d", varattno); - - Assert(netlevelsup == 0); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - return result; - } - else - { - elog(ERROR, "bogus varno: %d", varno); - return NULL; /* keep compiler quiet */ - } - - if (attnum == InvalidAttrNumber) - { - /* Var is whole-row reference to RTE, so select the right field */ - return get_rte_attribute_name(rte, fieldno); - } - - /* - * This part has essentially the same logic as the parser's - * expandRecordVariable() function, but we are dealing with a different - * representation of the input context, and we only need one field name - * not a TupleDesc. Also, we need special cases for finding subquery and - * CTE subplans when deparsing Plan trees. - */ - expr = (Node *) var; /* default if we can't drill down */ - - switch (rte->rtekind) - { - case RTE_RELATION: - case RTE_VALUES: - case RTE_NAMEDTUPLESTORE: - case RTE_RESULT: - - /* - * This case should not occur: a column of a table or values list - * shouldn't have type RECORD. Fall through and fail (most - * likely) at the bottom. - */ - break; - case RTE_SUBQUERY: - /* Subselect-in-FROM: examine sub-select's output expr */ - { - if (rte->subquery) - { - TargetEntry *ste = get_tle_by_resno(rte->subquery->targetList, - attnum); - - if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", - rte->eref->aliasname, attnum); - expr = (Node *) ste->expr; - if (IsA(expr, Var)) - { - /* - * Recurse into the sub-select to see what its Var - * refers to. We have to build an additional level of - * namespace to keep in step with varlevelsup in the - * subselect. - */ - deparse_namespace mydpns; - const char *result; - - set_deparse_for_query(&mydpns, rte->subquery, - context->namespaces); - - context->namespaces = lcons(&mydpns, - context->namespaces); - - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - - context->namespaces = - list_delete_first(context->namespaces); - - return result; - } - /* else fall through to inspect the expression */ - } - else - { - /* - * We're deparsing a Plan tree so we don't have complete - * RTE entries (in particular, rte->subquery is NULL). But - * the only place we'd see a Var directly referencing a - * SUBQUERY RTE is in a SubqueryScan plan node, and we can - * look into the child plan's tlist instead. - */ - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - if (!dpns->inner_plan) - elog(ERROR, "failed to find plan for subquery %s", - rte->eref->aliasname); - tle = get_tle_by_resno(dpns->inner_tlist, attnum); - if (!tle) - elog(ERROR, "bogus varattno for subquery var: %d", - attnum); - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_plan, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - } - break; - case RTE_JOIN: - /* Join RTE --- recursively inspect the alias variable */ - if (rte->joinaliasvars == NIL) - elog(ERROR, "cannot decompile join alias var in plan tree"); - Assert(attnum > 0 && attnum <= list_length(rte->joinaliasvars)); - expr = (Node *) list_nth(rte->joinaliasvars, attnum - 1); - Assert(expr != NULL); - /* we intentionally don't strip implicit coercions here */ - if (IsA(expr, Var)) - return get_name_for_var_field((Var *) expr, fieldno, - var->varlevelsup + levelsup, - context); - /* else fall through to inspect the expression */ - break; - case RTE_FUNCTION: - case RTE_TABLEFUNC: - - /* - * We couldn't get here unless a function is declared with one of - * its result columns as RECORD, which is not allowed. - */ - break; - case RTE_CTE: - /* CTE reference: examine subquery's output expr */ - { - CommonTableExpr *cte = NULL; - Index ctelevelsup; - ListCell *lc; - - /* - * Try to find the referenced CTE using the namespace stack. - */ - ctelevelsup = rte->ctelevelsup + netlevelsup; - if (ctelevelsup >= list_length(context->namespaces)) - lc = NULL; - else - { - deparse_namespace *ctedpns; - - ctedpns = (deparse_namespace *) - list_nth(context->namespaces, ctelevelsup); - foreach(lc, ctedpns->ctes) - { - cte = (CommonTableExpr *) lfirst(lc); - if (strcmp(cte->ctename, rte->ctename) == 0) - break; - } - } - if (lc != NULL) - { - Query *ctequery = (Query *) cte->ctequery; - TargetEntry *ste = get_tle_by_resno(GetCTETargetList(cte), - attnum); - - if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", - rte->eref->aliasname, attnum); - expr = (Node *) ste->expr; - if (IsA(expr, Var)) - { - /* - * Recurse into the CTE to see what its Var refers to. - * We have to build an additional level of namespace - * to keep in step with varlevelsup in the CTE. - * Furthermore it could be an outer CTE, so we may - * have to delete some levels of namespace. - */ - List *save_nslist = context->namespaces; - List *new_nslist; - deparse_namespace mydpns; - const char *result; - - set_deparse_for_query(&mydpns, ctequery, - context->namespaces); - - new_nslist = list_copy_tail(context->namespaces, - ctelevelsup); - context->namespaces = lcons(&mydpns, new_nslist); - - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - - context->namespaces = save_nslist; - - return result; - } - /* else fall through to inspect the expression */ - } - else - { - /* - * We're deparsing a Plan tree so we don't have a CTE - * list. But the only place we'd see a Var directly - * referencing a CTE RTE is in a CteScan plan node, and we - * can look into the subplan's tlist instead. - */ - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - if (!dpns->inner_plan) - elog(ERROR, "failed to find plan for CTE %s", - rte->eref->aliasname); - tle = get_tle_by_resno(dpns->inner_tlist, attnum); - if (!tle) - elog(ERROR, "bogus varattno for subquery var: %d", - attnum); - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_plan, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - } - break; - } - - /* - * We now have an expression we can't expand any more, so see if - * get_expr_result_tupdesc() can do anything with it. - */ - tupleDesc = get_expr_result_tupdesc(expr, false); - /* Got the tupdesc, so we can extract the field name */ - Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); - return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname); -} - -/* - * Try to find the referenced expression for a PARAM_EXEC Param that might - * reference a parameter supplied by an upper NestLoop or SubPlan plan node. - * - * If successful, return the expression and set *dpns_p and *ancestor_cell_p - * appropriately for calling push_ancestor_plan(). If no referent can be - * found, return NULL. - */ -static Node * -find_param_referent(Param *param, deparse_context *context, - deparse_namespace **dpns_p, ListCell **ancestor_cell_p) -{ - /* Initialize output parameters to prevent compiler warnings */ - *dpns_p = NULL; - *ancestor_cell_p = NULL; - - /* - * If it's a PARAM_EXEC parameter, look for a matching NestLoopParam or - * SubPlan argument. This will necessarily be in some ancestor of the - * current expression's Plan. - */ - if (param->paramkind == PARAM_EXEC) - { - deparse_namespace *dpns; - Plan *child_plan; - bool in_same_plan_level; - ListCell *lc; - - dpns = (deparse_namespace *) linitial(context->namespaces); - child_plan = dpns->plan; - in_same_plan_level = true; - - foreach(lc, dpns->ancestors) - { - Node *ancestor = (Node *) lfirst(lc); - ListCell *lc2; - - /* - * NestLoops transmit params to their inner child only; also, once - * we've crawled up out of a subplan, this couldn't possibly be - * the right match. - */ - if (IsA(ancestor, NestLoop) && - child_plan == innerPlan(ancestor) && - in_same_plan_level) - { - NestLoop *nl = (NestLoop *) ancestor; - - foreach(lc2, nl->nestParams) - { - NestLoopParam *nlp = (NestLoopParam *) lfirst(lc2); - - if (nlp->paramno == param->paramid) - { - /* Found a match, so return it */ - *dpns_p = dpns; - *ancestor_cell_p = lc; - return (Node *) nlp->paramval; - } - } - } - - /* - * Check to see if we're crawling up from a subplan. - */ - if(IsA(ancestor, SubPlan)) - { - SubPlan *subplan = (SubPlan *) ancestor; - ListCell *lc3; - ListCell *lc4; - - /* Matched subplan, so check its arguments */ - forboth(lc3, subplan->parParam, lc4, subplan->args) - { - int paramid = lfirst_int(lc3); - Node *arg = (Node *) lfirst(lc4); - - if (paramid == param->paramid) - { - /* - * Found a match, so return it. But, since Vars in - * the arg are to be evaluated in the surrounding - * context, we have to point to the next ancestor item - * that is *not* a SubPlan. - */ - ListCell *rest; - - for_each_cell(rest, dpns->ancestors, - lnext(dpns->ancestors, lc)) - { - Node *ancestor2 = (Node *) lfirst(rest); - - if (!IsA(ancestor2, SubPlan)) - { - *dpns_p = dpns; - *ancestor_cell_p = rest; - return arg; - } - } - elog(ERROR, "SubPlan cannot be outermost ancestor"); - } - } - - /* We have emerged from a subplan. */ - in_same_plan_level = false; - - /* SubPlan isn't a kind of Plan, so skip the rest */ - continue; - } - - /* - * Check to see if we're emerging from an initplan of the current - * ancestor plan. Initplans never have any parParams, so no need - * to search that list, but we need to know if we should reset - * in_same_plan_level. - */ - foreach(lc2, ((Plan *) ancestor)->initPlan) - { - SubPlan *subplan = castNode(SubPlan, lfirst(lc2)); - - if (child_plan != (Plan *) list_nth(dpns->subplans, - subplan->plan_id - 1)) - continue; - - /* No parameters to be had here. */ - Assert(subplan->parParam == NIL); - - /* We have emerged from an initplan. */ - in_same_plan_level = false; - break; - } - - /* No luck, crawl up to next ancestor */ - child_plan = (Plan *) ancestor; - } - } - - /* No referent found */ - return NULL; -} - -/* - * Display a Param appropriately. - */ -static void -get_parameter(Param *param, deparse_context *context) -{ - Node *expr; - deparse_namespace *dpns; - ListCell *ancestor_cell; - - /* - * If it's a PARAM_EXEC parameter, try to locate the expression from which - * the parameter was computed. Note that failing to find a referent isn't - * an error, since the Param might well be a subplan output rather than an - * input. - */ - expr = find_param_referent(param, context, &dpns, &ancestor_cell); - if (expr) - { - /* Found a match, so print it */ - deparse_namespace save_dpns; - bool save_varprefix; - bool need_paren; - - /* Switch attention to the ancestor plan node */ - push_ancestor_plan(dpns, ancestor_cell, &save_dpns); - - /* - * Force prefixing of Vars, since they won't belong to the relation - * being scanned in the original plan node. - */ - save_varprefix = context->varprefix; - context->varprefix = true; - - /* - * A Param's expansion is typically a Var, Aggref, or upper-level - * Param, which wouldn't need extra parentheses. Otherwise, insert - * parens to ensure the expression looks atomic. - */ - need_paren = !(IsA(expr, Var) || - IsA(expr, Aggref) || - IsA(expr, Param)); - if (need_paren) - appendStringInfoChar(context->buf, '('); - - get_rule_expr(expr, context, false); - - if (need_paren) - appendStringInfoChar(context->buf, ')'); - - context->varprefix = save_varprefix; - - pop_ancestor_plan(dpns, &save_dpns); - - return; - } - - /* - * Not PARAM_EXEC, or couldn't find referent: for base types just print $N. - * For composite types, add cast to the parameter to ease remote node detect - * the type. - */ - if (param->paramtype >= FirstNormalObjectId) - { - char *typeName = format_type_with_typemod(param->paramtype, param->paramtypmod); - - appendStringInfo(context->buf, "$%d::%s", param->paramid, typeName); - } - else - { - appendStringInfo(context->buf, "$%d", param->paramid); - } -} - -/* - * get_simple_binary_op_name - * - * helper function for isSimpleNode - * will return single char binary operator name, or NULL if it's not - */ -static const char * -get_simple_binary_op_name(OpExpr *expr) -{ - List *args = expr->args; - - if (list_length(args) == 2) - { - /* binary operator */ - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - const char *op; - - op = generate_operator_name(expr->opno, exprType(arg1), exprType(arg2)); - if (strlen(op) == 1) - return op; - } - return NULL; -} - - -/* - * isSimpleNode - check if given node is simple (doesn't need parenthesizing) - * - * true : simple in the context of parent node's type - * false : not simple - */ -static bool -isSimpleNode(Node *node, Node *parentNode, int prettyFlags) -{ - if (!node) - return false; - - switch (nodeTag(node)) - { - case T_Var: - case T_Const: - case T_Param: - case T_CoerceToDomainValue: - case T_SetToDefault: - case T_CurrentOfExpr: - /* single words: always simple */ - return true; - - case T_SubscriptingRef: - case T_ArrayExpr: - case T_RowExpr: - case T_CoalesceExpr: - case T_MinMaxExpr: - case T_SQLValueFunction: - case T_XmlExpr: - case T_NextValueExpr: - case T_NullIfExpr: - case T_Aggref: - case T_WindowFunc: - case T_FuncExpr: - /* function-like: name(..) or name[..] */ - return true; - - /* CASE keywords act as parentheses */ - case T_CaseExpr: - return true; - - case T_FieldSelect: - - /* - * appears simple since . has top precedence, unless parent is - * T_FieldSelect itself! - */ - return (IsA(parentNode, FieldSelect) ? false : true); - - case T_FieldStore: - - /* - * treat like FieldSelect (probably doesn't matter) - */ - return (IsA(parentNode, FieldStore) ? false : true); - - case T_CoerceToDomain: - /* maybe simple, check args */ - return isSimpleNode((Node *) ((CoerceToDomain *) node)->arg, - node, prettyFlags); - case T_RelabelType: - return isSimpleNode((Node *) ((RelabelType *) node)->arg, - node, prettyFlags); - case T_CoerceViaIO: - return isSimpleNode((Node *) ((CoerceViaIO *) node)->arg, - node, prettyFlags); - case T_ArrayCoerceExpr: - return isSimpleNode((Node *) ((ArrayCoerceExpr *) node)->arg, - node, prettyFlags); - case T_ConvertRowtypeExpr: - return isSimpleNode((Node *) ((ConvertRowtypeExpr *) node)->arg, - node, prettyFlags); - - case T_OpExpr: - { - /* depends on parent node type; needs further checking */ - if (prettyFlags & PRETTYFLAG_PAREN && IsA(parentNode, OpExpr)) - { - const char *op; - const char *parentOp; - bool is_lopriop; - bool is_hipriop; - bool is_lopriparent; - bool is_hipriparent; - - op = get_simple_binary_op_name((OpExpr *) node); - if (!op) - return false; - - /* We know only the basic operators + - and * / % */ - is_lopriop = (strchr("+-", *op) != NULL); - is_hipriop = (strchr("*/%", *op) != NULL); - if (!(is_lopriop || is_hipriop)) - return false; - - parentOp = get_simple_binary_op_name((OpExpr *) parentNode); - if (!parentOp) - return false; - - is_lopriparent = (strchr("+-", *parentOp) != NULL); - is_hipriparent = (strchr("*/%", *parentOp) != NULL); - if (!(is_lopriparent || is_hipriparent)) - return false; - - if (is_hipriop && is_lopriparent) - return true; /* op binds tighter than parent */ - - if (is_lopriop && is_hipriparent) - return false; - - /* - * Operators are same priority --- can skip parens only if - * we have (a - b) - c, not a - (b - c). - */ - if (node == (Node *) linitial(((OpExpr *) parentNode)->args)) - return true; - - return false; - } - /* else do the same stuff as for T_SubLink et al. */ - } - /* FALLTHROUGH */ - - case T_SubLink: - case T_NullTest: - case T_BooleanTest: - case T_DistinctExpr: - switch (nodeTag(parentNode)) - { - case T_FuncExpr: - { - /* special handling for casts */ - CoercionForm type = ((FuncExpr *) parentNode)->funcformat; - - if (type == COERCE_EXPLICIT_CAST || - type == COERCE_IMPLICIT_CAST) - return false; - return true; /* own parentheses */ - } - case T_BoolExpr: /* lower precedence */ - case T_SubscriptingRef: /* other separators */ - case T_ArrayExpr: /* other separators */ - case T_RowExpr: /* other separators */ - case T_CoalesceExpr: /* own parentheses */ - case T_MinMaxExpr: /* own parentheses */ - case T_XmlExpr: /* own parentheses */ - case T_NullIfExpr: /* other separators */ - case T_Aggref: /* own parentheses */ - case T_WindowFunc: /* own parentheses */ - case T_CaseExpr: /* other separators */ - return true; - default: - return false; - } - - case T_BoolExpr: - switch (nodeTag(parentNode)) - { - case T_BoolExpr: - if (prettyFlags & PRETTYFLAG_PAREN) - { - BoolExprType type; - BoolExprType parentType; - - type = ((BoolExpr *) node)->boolop; - parentType = ((BoolExpr *) parentNode)->boolop; - switch (type) - { - case NOT_EXPR: - case AND_EXPR: - if (parentType == AND_EXPR || parentType == OR_EXPR) - return true; - break; - case OR_EXPR: - if (parentType == OR_EXPR) - return true; - break; - } - } - return false; - case T_FuncExpr: - { - /* special handling for casts */ - CoercionForm type = ((FuncExpr *) parentNode)->funcformat; - - if (type == COERCE_EXPLICIT_CAST || - type == COERCE_IMPLICIT_CAST) - return false; - return true; /* own parentheses */ - } - case T_SubscriptingRef: /* other separators */ - case T_ArrayExpr: /* other separators */ - case T_RowExpr: /* other separators */ - case T_CoalesceExpr: /* own parentheses */ - case T_MinMaxExpr: /* own parentheses */ - case T_XmlExpr: /* own parentheses */ - case T_NullIfExpr: /* other separators */ - case T_Aggref: /* own parentheses */ - case T_WindowFunc: /* own parentheses */ - case T_CaseExpr: /* other separators */ - return true; - default: - return false; - } - - default: - break; - } - /* those we don't know: in dubio complexo */ - return false; -} - - -/* - * appendContextKeyword - append a keyword to buffer - * - * If prettyPrint is enabled, perform a line break, and adjust indentation. - * Otherwise, just append the keyword. - */ -static void -appendContextKeyword(deparse_context *context, const char *str, - int indentBefore, int indentAfter, int indentPlus) -{ - StringInfo buf = context->buf; - - if (PRETTY_INDENT(context)) - { - int indentAmount; - - context->indentLevel += indentBefore; - - /* remove any trailing spaces currently in the buffer ... */ - removeStringInfoSpaces(buf); - /* ... then add a newline and some spaces */ - appendStringInfoChar(buf, '\n'); - - if (context->indentLevel < PRETTYINDENT_LIMIT) - indentAmount = Max(context->indentLevel, 0) + indentPlus; - else - { - /* - * If we're indented more than PRETTYINDENT_LIMIT characters, try - * to conserve horizontal space by reducing the per-level - * indentation. For best results the scale factor here should - * divide all the indent amounts that get added to indentLevel - * (PRETTYINDENT_STD, etc). It's important that the indentation - * not grow unboundedly, else deeply-nested trees use O(N^2) - * whitespace; so we also wrap modulo PRETTYINDENT_LIMIT. - */ - indentAmount = PRETTYINDENT_LIMIT + - (context->indentLevel - PRETTYINDENT_LIMIT) / - (PRETTYINDENT_STD / 2); - indentAmount %= PRETTYINDENT_LIMIT; - /* scale/wrap logic affects indentLevel, but not indentPlus */ - indentAmount += indentPlus; - } - appendStringInfoSpaces(buf, indentAmount); - - appendStringInfoString(buf, str); - - context->indentLevel += indentAfter; - if (context->indentLevel < 0) - context->indentLevel = 0; - } - else - appendStringInfoString(buf, str); -} - -/* - * removeStringInfoSpaces - delete trailing spaces from a buffer. - * - * Possibly this should move to stringinfo.c at some point. - */ -static void -removeStringInfoSpaces(StringInfo str) -{ - while (str->len > 0 && str->data[str->len - 1] == ' ') - str->data[--(str->len)] = '\0'; -} - - -/* - * get_rule_expr_paren - deparse expr using get_rule_expr, - * embracing the string with parentheses if necessary for prettyPrint. - * - * Never embrace if prettyFlags=0, because it's done in the calling node. - * - * Any node that does *not* embrace its argument node by sql syntax (with - * parentheses, non-operator keywords like CASE/WHEN/ON, or comma etc) should - * use get_rule_expr_paren instead of get_rule_expr so parentheses can be - * added. - */ -static void -get_rule_expr_paren(Node *node, deparse_context *context, - bool showimplicit, Node *parentNode) -{ - bool need_paren; - - need_paren = PRETTY_PAREN(context) && - !isSimpleNode(node, parentNode, context->prettyFlags); - - if (need_paren) - appendStringInfoChar(context->buf, '('); - - get_rule_expr(node, context, showimplicit); - - if (need_paren) - appendStringInfoChar(context->buf, ')'); -} - - -/* ---------- - * get_rule_expr - Parse back an expression - * - * Note: showimplicit determines whether we display any implicit cast that - * is present at the top of the expression tree. It is a passed argument, - * not a field of the context struct, because we change the value as we - * recurse down into the expression. In general we suppress implicit casts - * when the result type is known with certainty (eg, the arguments of an - * OR must be boolean). We display implicit casts for arguments of functions - * and operators, since this is needed to be certain that the same function - * or operator will be chosen when the expression is re-parsed. - * ---------- - */ -static void -get_rule_expr(Node *node, deparse_context *context, - bool showimplicit) -{ - StringInfo buf = context->buf; - - if (node == NULL) - return; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - /* - * Each level of get_rule_expr must emit an indivisible term - * (parenthesized if necessary) to ensure result is reparsed into the same - * expression tree. The only exception is that when the input is a List, - * we emit the component items comma-separated with no surrounding - * decoration; this is convenient for most callers. - */ - switch (nodeTag(node)) - { - case T_Var: - (void) get_variable((Var *) node, 0, false, context); - break; - - case T_Const: - get_const_expr((Const *) node, context, 0); - break; - - case T_Param: - get_parameter((Param *) node, context); - break; - - case T_Aggref: - get_agg_expr((Aggref *) node, context, (Aggref *) node); - break; - - case T_GroupingFunc: - { - GroupingFunc *gexpr = (GroupingFunc *) node; - - appendStringInfoString(buf, "GROUPING("); - get_rule_expr((Node *) gexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_WindowFunc: - get_windowfunc_expr((WindowFunc *) node, context); - break; - - case T_SubscriptingRef: - { - SubscriptingRef *sbsref = (SubscriptingRef *) node; - bool need_parens; - - /* - * If the argument is a CaseTestExpr, we must be inside a - * FieldStore, ie, we are assigning to an element of an array - * within a composite column. Since we already punted on - * displaying the FieldStore's target information, just punt - * here too, and display only the assignment source - * expression. - */ - if (IsA(sbsref->refexpr, CaseTestExpr)) - { - Assert(sbsref->refassgnexpr); - get_rule_expr((Node *) sbsref->refassgnexpr, - context, showimplicit); - break; - } - - /* - * Parenthesize the argument unless it's a simple Var or a - * FieldSelect. (In particular, if it's another - * SubscriptingRef, we *must* parenthesize to avoid - * confusion.) - */ - need_parens = !IsA(sbsref->refexpr, Var) && - !IsA(sbsref->refexpr, FieldSelect); - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) sbsref->refexpr, context, showimplicit); - if (need_parens) - appendStringInfoChar(buf, ')'); - - /* - * If there's a refassgnexpr, we want to print the node in the - * format "container[subscripts] := refassgnexpr". This is - * not legal SQL, so decompilation of INSERT or UPDATE - * statements should always use processIndirection as part of - * the statement-level syntax. We should only see this when - * EXPLAIN tries to print the targetlist of a plan resulting - * from such a statement. - */ - if (sbsref->refassgnexpr) - { - Node *refassgnexpr; - - /* - * Use processIndirection to print this node's subscripts - * as well as any additional field selections or - * subscripting in immediate descendants. It returns the - * RHS expr that is actually being "assigned". - */ - refassgnexpr = processIndirection(node, context); - appendStringInfoString(buf, " := "); - get_rule_expr(refassgnexpr, context, showimplicit); - } - else - { - /* Just an ordinary container fetch, so print subscripts */ - printSubscripts(sbsref, context); - } - } - break; - - case T_FuncExpr: - get_func_expr((FuncExpr *) node, context, showimplicit); - break; - - case T_NamedArgExpr: - { - NamedArgExpr *na = (NamedArgExpr *) node; - - appendStringInfo(buf, "%s => ", quote_identifier(na->name)); - get_rule_expr((Node *) na->arg, context, showimplicit); - } - break; - - case T_OpExpr: - get_oper_expr((OpExpr *) node, context); - break; - - case T_DistinctExpr: - { - DistinctExpr *expr = (DistinctExpr *) node; - List *args = expr->args; - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg1, context, true, node); - appendStringInfoString(buf, " IS DISTINCT FROM "); - get_rule_expr_paren(arg2, context, true, node); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_NullIfExpr: - { - NullIfExpr *nullifexpr = (NullIfExpr *) node; - - appendStringInfoString(buf, "NULLIF("); - get_rule_expr((Node *) nullifexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_ScalarArrayOpExpr: - { - ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node; - List *args = expr->args; - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg1, context, true, node); - appendStringInfo(buf, " %s %s (", - generate_operator_name(expr->opno, - exprType(arg1), - get_base_element_type(exprType(arg2))), - expr->useOr ? "ANY" : "ALL"); - get_rule_expr_paren(arg2, context, true, node); - - /* - * There's inherent ambiguity in "x op ANY/ALL (y)" when y is - * a bare sub-SELECT. Since we're here, the sub-SELECT must - * be meant as a scalar sub-SELECT yielding an array value to - * be used in ScalarArrayOpExpr; but the grammar will - * preferentially interpret such a construct as an ANY/ALL - * SubLink. To prevent misparsing the output that way, insert - * a dummy coercion (which will be stripped by parse analysis, - * so no inefficiency is added in dump and reload). This is - * indeed most likely what the user wrote to get the construct - * accepted in the first place. - */ - if (IsA(arg2, SubLink) && - ((SubLink *) arg2)->subLinkType == EXPR_SUBLINK) - appendStringInfo(buf, "::%s", - format_type_with_typemod(exprType(arg2), - exprTypmod(arg2))); - appendStringInfoChar(buf, ')'); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_BoolExpr: - { - BoolExpr *expr = (BoolExpr *) node; - Node *first_arg = linitial(expr->args); - ListCell *arg = list_second_cell(expr->args); - - switch (expr->boolop) - { - case AND_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(first_arg, context, - false, node); - while (arg) - { - appendStringInfoString(buf, " AND "); - get_rule_expr_paren((Node *) lfirst(arg), context, - false, node); - arg = lnext(expr->args, arg); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - case OR_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(first_arg, context, - false, node); - while (arg) - { - appendStringInfoString(buf, " OR "); - get_rule_expr_paren((Node *) lfirst(arg), context, - false, node); - arg = lnext(expr->args, arg); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - case NOT_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - appendStringInfoString(buf, "NOT "); - get_rule_expr_paren(first_arg, context, - false, node); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - default: - elog(ERROR, "unrecognized boolop: %d", - (int) expr->boolop); - } - } - break; - - case T_SubLink: - get_sublink_expr((SubLink *) node, context); - break; - - case T_SubPlan: - { - SubPlan *subplan = (SubPlan *) node; - - /* - * We cannot see an already-planned subplan in rule deparsing, - * only while EXPLAINing a query plan. We don't try to - * reconstruct the original SQL, just reference the subplan - * that appears elsewhere in EXPLAIN's result. - */ - if (subplan->useHashTable) - appendStringInfo(buf, "(hashed %s)", subplan->plan_name); - else - appendStringInfo(buf, "(%s)", subplan->plan_name); - } - break; - - case T_AlternativeSubPlan: - { - AlternativeSubPlan *asplan = (AlternativeSubPlan *) node; - ListCell *lc; - - /* As above, this can only happen during EXPLAIN */ - appendStringInfoString(buf, "(alternatives: "); - foreach(lc, asplan->subplans) - { - SubPlan *splan = lfirst_node(SubPlan, lc); - - if (splan->useHashTable) - appendStringInfo(buf, "hashed %s", splan->plan_name); - else - appendStringInfoString(buf, splan->plan_name); - if (lnext(asplan->subplans, lc)) - appendStringInfoString(buf, " or "); - } - appendStringInfoChar(buf, ')'); - } - break; - - case T_FieldSelect: - { - FieldSelect *fselect = (FieldSelect *) node; - Node *arg = (Node *) fselect->arg; - int fno = fselect->fieldnum; - const char *fieldname; - bool need_parens; - - /* - * Parenthesize the argument unless it's an SubscriptingRef or - * another FieldSelect. Note in particular that it would be - * WRONG to not parenthesize a Var argument; simplicity is not - * the issue here, having the right number of names is. - */ - need_parens = !IsA(arg, SubscriptingRef) && - !IsA(arg, FieldSelect); - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr(arg, context, true); - if (need_parens) - appendStringInfoChar(buf, ')'); - - /* - * Get and print the field name. - */ - fieldname = get_name_for_var_field((Var *) arg, fno, - 0, context); - appendStringInfo(buf, ".%s", quote_identifier(fieldname)); - } - break; - - case T_FieldStore: - { - FieldStore *fstore = (FieldStore *) node; - bool need_parens; - - /* - * There is no good way to represent a FieldStore as real SQL, - * so decompilation of INSERT or UPDATE statements should - * always use processIndirection as part of the - * statement-level syntax. We should only get here when - * EXPLAIN tries to print the targetlist of a plan resulting - * from such a statement. The plan case is even harder than - * ordinary rules would be, because the planner tries to - * collapse multiple assignments to the same field or subfield - * into one FieldStore; so we can see a list of target fields - * not just one, and the arguments could be FieldStores - * themselves. We don't bother to try to print the target - * field names; we just print the source arguments, with a - * ROW() around them if there's more than one. This isn't - * terribly complete, but it's probably good enough for - * EXPLAIN's purposes; especially since anything more would be - * either hopelessly confusing or an even poorer - * representation of what the plan is actually doing. - */ - need_parens = (list_length(fstore->newvals) != 1); - if (need_parens) - appendStringInfoString(buf, "ROW("); - get_rule_expr((Node *) fstore->newvals, context, showimplicit); - if (need_parens) - appendStringInfoChar(buf, ')'); - } - break; - - case T_RelabelType: - { - RelabelType *relabel = (RelabelType *) node; - Node *arg = (Node *) relabel->arg; - - if (relabel->relabelformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - relabel->resulttype, - relabel->resulttypmod, - node); - } - } - break; - - case T_CoerceViaIO: - { - CoerceViaIO *iocoerce = (CoerceViaIO *) node; - Node *arg = (Node *) iocoerce->arg; - - if (iocoerce->coerceformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - iocoerce->resulttype, - -1, - node); - } - } - break; - - case T_ArrayCoerceExpr: - { - ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; - Node *arg = (Node *) acoerce->arg; - - if (acoerce->coerceformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - acoerce->resulttype, - acoerce->resulttypmod, - node); - } - } - break; - - case T_ConvertRowtypeExpr: - { - ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node; - Node *arg = (Node *) convert->arg; - - if (convert->convertformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - convert->resulttype, -1, - node); - } - } - break; - - case T_CollateExpr: - { - CollateExpr *collate = (CollateExpr *) node; - Node *arg = (Node *) collate->arg; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg, context, showimplicit, node); - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(collate->collOid)); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_CaseExpr: - { - CaseExpr *caseexpr = (CaseExpr *) node; - ListCell *temp; - - appendContextKeyword(context, "CASE", - 0, PRETTYINDENT_VAR, 0); - if (caseexpr->arg) - { - appendStringInfoChar(buf, ' '); - get_rule_expr((Node *) caseexpr->arg, context, true); - } - foreach(temp, caseexpr->args) - { - CaseWhen *when = (CaseWhen *) lfirst(temp); - Node *w = (Node *) when->expr; - - if (caseexpr->arg) - { - /* - * The parser should have produced WHEN clauses of the - * form "CaseTestExpr = RHS", possibly with an - * implicit coercion inserted above the CaseTestExpr. - * For accurate decompilation of rules it's essential - * that we show just the RHS. However in an - * expression that's been through the optimizer, the - * WHEN clause could be almost anything (since the - * equality operator could have been expanded into an - * inline function). If we don't recognize the form - * of the WHEN clause, just punt and display it as-is. - */ - if (IsA(w, OpExpr)) - { - List *args = ((OpExpr *) w)->args; - - if (list_length(args) == 2 && - IsA(strip_implicit_coercions(linitial(args)), - CaseTestExpr)) - w = (Node *) lsecond(args); - } - } - - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "WHEN ", - 0, 0, 0); - get_rule_expr(w, context, false); - appendStringInfoString(buf, " THEN "); - get_rule_expr((Node *) when->result, context, true); - } - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "ELSE ", - 0, 0, 0); - get_rule_expr((Node *) caseexpr->defresult, context, true); - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "END", - -PRETTYINDENT_VAR, 0, 0); - } - break; - - case T_CaseTestExpr: - { - /* - * Normally we should never get here, since for expressions - * that can contain this node type we attempt to avoid - * recursing to it. But in an optimized expression we might - * be unable to avoid that (see comments for CaseExpr). If we - * do see one, print it as CASE_TEST_EXPR. - */ - appendStringInfoString(buf, "CASE_TEST_EXPR"); - } - break; - - case T_ArrayExpr: - { - ArrayExpr *arrayexpr = (ArrayExpr *) node; - - appendStringInfoString(buf, "ARRAY["); - get_rule_expr((Node *) arrayexpr->elements, context, true); - appendStringInfoChar(buf, ']'); - - /* - * If the array isn't empty, we assume its elements are - * coerced to the desired type. If it's empty, though, we - * need an explicit coercion to the array type. - */ - if (arrayexpr->elements == NIL) - appendStringInfo(buf, "::%s", - format_type_with_typemod(arrayexpr->array_typeid, -1)); - } - break; - - case T_RowExpr: - { - RowExpr *rowexpr = (RowExpr *) node; - TupleDesc tupdesc = NULL; - ListCell *arg; - int i; - char *sep; - - /* - * If it's a named type and not RECORD, we may have to skip - * dropped columns and/or claim there are NULLs for added - * columns. - */ - if (rowexpr->row_typeid != RECORDOID) - { - tupdesc = lookup_rowtype_tupdesc(rowexpr->row_typeid, -1); - Assert(list_length(rowexpr->args) <= tupdesc->natts); - } - - /* - * SQL99 allows "ROW" to be omitted when there is more than - * one column, but for simplicity we always print it. - */ - appendStringInfoString(buf, "ROW("); - sep = ""; - i = 0; - foreach(arg, rowexpr->args) - { - Node *e = (Node *) lfirst(arg); - - if (tupdesc == NULL || - !TupleDescAttr(tupdesc, i)->attisdropped) - { - appendStringInfoString(buf, sep); - /* Whole-row Vars need special treatment here */ - get_rule_expr_toplevel(e, context, true); - sep = ", "; - } - i++; - } - if (tupdesc != NULL) - { - while (i < tupdesc->natts) - { - if (!TupleDescAttr(tupdesc, i)->attisdropped) - { - appendStringInfoString(buf, sep); - appendStringInfoString(buf, "NULL"); - sep = ", "; - } - i++; - } - - ReleaseTupleDesc(tupdesc); - } - appendStringInfoChar(buf, ')'); - if (rowexpr->row_format == COERCE_EXPLICIT_CAST) - appendStringInfo(buf, "::%s", - format_type_with_typemod(rowexpr->row_typeid, -1)); - } - break; - - case T_RowCompareExpr: - { - RowCompareExpr *rcexpr = (RowCompareExpr *) node; - ListCell *arg; - char *sep; - - /* - * SQL99 allows "ROW" to be omitted when there is more than - * one column, but for simplicity we always print it. - */ - appendStringInfoString(buf, "(ROW("); - sep = ""; - foreach(arg, rcexpr->largs) - { - Node *e = (Node *) lfirst(arg); - - appendStringInfoString(buf, sep); - get_rule_expr(e, context, true); - sep = ", "; - } - - /* - * We assume that the name of the first-column operator will - * do for all the rest too. This is definitely open to - * failure, eg if some but not all operators were renamed - * since the construct was parsed, but there seems no way to - * be perfect. - */ - appendStringInfo(buf, ") %s ROW(", - generate_operator_name(linitial_oid(rcexpr->opnos), - exprType(linitial(rcexpr->largs)), - exprType(linitial(rcexpr->rargs)))); - sep = ""; - foreach(arg, rcexpr->rargs) - { - Node *e = (Node *) lfirst(arg); - - appendStringInfoString(buf, sep); - get_rule_expr(e, context, true); - sep = ", "; - } - appendStringInfoString(buf, "))"); - } - break; - - case T_CoalesceExpr: - { - CoalesceExpr *coalesceexpr = (CoalesceExpr *) node; - - appendStringInfoString(buf, "COALESCE("); - get_rule_expr((Node *) coalesceexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_MinMaxExpr: - { - MinMaxExpr *minmaxexpr = (MinMaxExpr *) node; - - switch (minmaxexpr->op) - { - case IS_GREATEST: - appendStringInfoString(buf, "GREATEST("); - break; - case IS_LEAST: - appendStringInfoString(buf, "LEAST("); - break; - } - get_rule_expr((Node *) minmaxexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_SQLValueFunction: - { - SQLValueFunction *svf = (SQLValueFunction *) node; - - /* - * Note: this code knows that typmod for time, timestamp, and - * timestamptz just prints as integer. - */ - switch (svf->op) - { - case SVFOP_CURRENT_DATE: - appendStringInfoString(buf, "CURRENT_DATE"); - break; - case SVFOP_CURRENT_TIME: - appendStringInfoString(buf, "CURRENT_TIME"); - break; - case SVFOP_CURRENT_TIME_N: - appendStringInfo(buf, "CURRENT_TIME(%d)", svf->typmod); - break; - case SVFOP_CURRENT_TIMESTAMP: - appendStringInfoString(buf, "CURRENT_TIMESTAMP"); - break; - case SVFOP_CURRENT_TIMESTAMP_N: - appendStringInfo(buf, "CURRENT_TIMESTAMP(%d)", - svf->typmod); - break; - case SVFOP_LOCALTIME: - appendStringInfoString(buf, "LOCALTIME"); - break; - case SVFOP_LOCALTIME_N: - appendStringInfo(buf, "LOCALTIME(%d)", svf->typmod); - break; - case SVFOP_LOCALTIMESTAMP: - appendStringInfoString(buf, "LOCALTIMESTAMP"); - break; - case SVFOP_LOCALTIMESTAMP_N: - appendStringInfo(buf, "LOCALTIMESTAMP(%d)", - svf->typmod); - break; - case SVFOP_CURRENT_ROLE: - appendStringInfoString(buf, "CURRENT_ROLE"); - break; - case SVFOP_CURRENT_USER: - appendStringInfoString(buf, "CURRENT_USER"); - break; - case SVFOP_USER: - appendStringInfoString(buf, "USER"); - break; - case SVFOP_SESSION_USER: - appendStringInfoString(buf, "SESSION_USER"); - break; - case SVFOP_CURRENT_CATALOG: - appendStringInfoString(buf, "CURRENT_CATALOG"); - break; - case SVFOP_CURRENT_SCHEMA: - appendStringInfoString(buf, "CURRENT_SCHEMA"); - break; - } - } - break; - - case T_XmlExpr: - { - XmlExpr *xexpr = (XmlExpr *) node; - bool needcomma = false; - ListCell *arg; - ListCell *narg; - Const *con; - - switch (xexpr->op) - { - case IS_XMLCONCAT: - appendStringInfoString(buf, "XMLCONCAT("); - break; - case IS_XMLELEMENT: - appendStringInfoString(buf, "XMLELEMENT("); - break; - case IS_XMLFOREST: - appendStringInfoString(buf, "XMLFOREST("); - break; - case IS_XMLPARSE: - appendStringInfoString(buf, "XMLPARSE("); - break; - case IS_XMLPI: - appendStringInfoString(buf, "XMLPI("); - break; - case IS_XMLROOT: - appendStringInfoString(buf, "XMLROOT("); - break; - case IS_XMLSERIALIZE: - appendStringInfoString(buf, "XMLSERIALIZE("); - break; - case IS_DOCUMENT: - break; - } - if (xexpr->op == IS_XMLPARSE || xexpr->op == IS_XMLSERIALIZE) - { - if (xexpr->xmloption == XMLOPTION_DOCUMENT) - appendStringInfoString(buf, "DOCUMENT "); - else - appendStringInfoString(buf, "CONTENT "); - } - if (xexpr->name) - { - appendStringInfo(buf, "NAME %s", - quote_identifier(map_xml_name_to_sql_identifier(xexpr->name))); - needcomma = true; - } - if (xexpr->named_args) - { - if (xexpr->op != IS_XMLFOREST) - { - if (needcomma) - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, "XMLATTRIBUTES("); - needcomma = false; - } - forboth(arg, xexpr->named_args, narg, xexpr->arg_names) - { - Node *e = (Node *) lfirst(arg); - char *argname = strVal(lfirst(narg)); - - if (needcomma) - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) e, context, true); - appendStringInfo(buf, " AS %s", - quote_identifier(map_xml_name_to_sql_identifier(argname))); - needcomma = true; - } - if (xexpr->op != IS_XMLFOREST) - appendStringInfoChar(buf, ')'); - } - if (xexpr->args) - { - if (needcomma) - appendStringInfoString(buf, ", "); - switch (xexpr->op) - { - case IS_XMLCONCAT: - case IS_XMLELEMENT: - case IS_XMLFOREST: - case IS_XMLPI: - case IS_XMLSERIALIZE: - /* no extra decoration needed */ - get_rule_expr((Node *) xexpr->args, context, true); - break; - case IS_XMLPARSE: - Assert(list_length(xexpr->args) == 2); - - get_rule_expr((Node *) linitial(xexpr->args), - context, true); - - con = lsecond_node(Const, xexpr->args); - Assert(!con->constisnull); - if (DatumGetBool(con->constvalue)) - appendStringInfoString(buf, - " PRESERVE WHITESPACE"); - else - appendStringInfoString(buf, - " STRIP WHITESPACE"); - break; - case IS_XMLROOT: - Assert(list_length(xexpr->args) == 3); - - get_rule_expr((Node *) linitial(xexpr->args), - context, true); - - appendStringInfoString(buf, ", VERSION "); - con = (Const *) lsecond(xexpr->args); - if (IsA(con, Const) && - con->constisnull) - appendStringInfoString(buf, "NO VALUE"); - else - get_rule_expr((Node *) con, context, false); - - con = lthird_node(Const, xexpr->args); - if (con->constisnull) - /* suppress STANDALONE NO VALUE */ ; - else - { - switch (DatumGetInt32(con->constvalue)) - { - case XML_STANDALONE_YES: - appendStringInfoString(buf, - ", STANDALONE YES"); - break; - case XML_STANDALONE_NO: - appendStringInfoString(buf, - ", STANDALONE NO"); - break; - case XML_STANDALONE_NO_VALUE: - appendStringInfoString(buf, - ", STANDALONE NO VALUE"); - break; - default: - break; - } - } - break; - case IS_DOCUMENT: - get_rule_expr_paren((Node *) xexpr->args, context, false, node); - break; - } - - } - if (xexpr->op == IS_XMLSERIALIZE) - appendStringInfo(buf, " AS %s", - format_type_with_typemod(xexpr->type, - xexpr->typmod)); - if (xexpr->op == IS_DOCUMENT) - appendStringInfoString(buf, " IS DOCUMENT"); - else - appendStringInfoChar(buf, ')'); - } - break; - - case T_NullTest: - { - NullTest *ntest = (NullTest *) node; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren((Node *) ntest->arg, context, true, node); - - /* - * For scalar inputs, we prefer to print as IS [NOT] NULL, - * which is shorter and traditional. If it's a rowtype input - * but we're applying a scalar test, must print IS [NOT] - * DISTINCT FROM NULL to be semantically correct. - */ - if (ntest->argisrow || - !type_is_rowtype(exprType((Node *) ntest->arg))) - { - switch (ntest->nulltesttype) - { - case IS_NULL: - appendStringInfoString(buf, " IS NULL"); - break; - case IS_NOT_NULL: - appendStringInfoString(buf, " IS NOT NULL"); - break; - default: - elog(ERROR, "unrecognized nulltesttype: %d", - (int) ntest->nulltesttype); - } - } - else - { - switch (ntest->nulltesttype) - { - case IS_NULL: - appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL"); - break; - case IS_NOT_NULL: - appendStringInfoString(buf, " IS DISTINCT FROM NULL"); - break; - default: - elog(ERROR, "unrecognized nulltesttype: %d", - (int) ntest->nulltesttype); - } - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_BooleanTest: - { - BooleanTest *btest = (BooleanTest *) node; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren((Node *) btest->arg, context, false, node); - switch (btest->booltesttype) - { - case IS_TRUE: - appendStringInfoString(buf, " IS TRUE"); - break; - case IS_NOT_TRUE: - appendStringInfoString(buf, " IS NOT TRUE"); - break; - case IS_FALSE: - appendStringInfoString(buf, " IS FALSE"); - break; - case IS_NOT_FALSE: - appendStringInfoString(buf, " IS NOT FALSE"); - break; - case IS_UNKNOWN: - appendStringInfoString(buf, " IS UNKNOWN"); - break; - case IS_NOT_UNKNOWN: - appendStringInfoString(buf, " IS NOT UNKNOWN"); - break; - default: - elog(ERROR, "unrecognized booltesttype: %d", - (int) btest->booltesttype); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_CoerceToDomain: - { - CoerceToDomain *ctest = (CoerceToDomain *) node; - Node *arg = (Node *) ctest->arg; - - if (ctest->coercionformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr(arg, context, false); - } - else - { - get_coercion_expr(arg, context, - ctest->resulttype, - ctest->resulttypmod, - node); - } - } - break; - - case T_CoerceToDomainValue: - appendStringInfoString(buf, "VALUE"); - break; - - case T_SetToDefault: - appendStringInfoString(buf, "DEFAULT"); - break; - - case T_CurrentOfExpr: - { - CurrentOfExpr *cexpr = (CurrentOfExpr *) node; - - if (cexpr->cursor_name) - appendStringInfo(buf, "CURRENT OF %s", - quote_identifier(cexpr->cursor_name)); - else - appendStringInfo(buf, "CURRENT OF $%d", - cexpr->cursor_param); - } - break; - - case T_NextValueExpr: - { - NextValueExpr *nvexpr = (NextValueExpr *) node; - - /* - * This isn't exactly nextval(), but that seems close enough - * for EXPLAIN's purposes. - */ - appendStringInfoString(buf, "nextval("); - simple_quote_literal(buf, - generate_relation_name(nvexpr->seqid, - NIL)); - appendStringInfoChar(buf, ')'); - } - break; - - case T_InferenceElem: - { - InferenceElem *iexpr = (InferenceElem *) node; - bool save_varprefix; - bool need_parens; - - /* - * InferenceElem can only refer to target relation, so a - * prefix is not useful, and indeed would cause parse errors. - */ - save_varprefix = context->varprefix; - context->varprefix = false; - - /* - * Parenthesize the element unless it's a simple Var or a bare - * function call. Follows pg_get_indexdef_worker(). - */ - need_parens = !IsA(iexpr->expr, Var); - if (IsA(iexpr->expr, FuncExpr) && - ((FuncExpr *) iexpr->expr)->funcformat == - COERCE_EXPLICIT_CALL) - need_parens = false; - - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) iexpr->expr, - context, false); - if (need_parens) - appendStringInfoChar(buf, ')'); - - context->varprefix = save_varprefix; - - if (iexpr->infercollid) - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(iexpr->infercollid)); - - /* Add the operator class name, if not default */ - if (iexpr->inferopclass) - { - Oid inferopclass = iexpr->inferopclass; - Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass); - - get_opclass_name(inferopclass, inferopcinputtype, buf); - } - } - break; - - case T_PartitionBoundSpec: - { - PartitionBoundSpec *spec = (PartitionBoundSpec *) node; - ListCell *cell; - char *sep; - - if (spec->is_default) - { - appendStringInfoString(buf, "DEFAULT"); - break; - } - - switch (spec->strategy) - { - case PARTITION_STRATEGY_HASH: - Assert(spec->modulus > 0 && spec->remainder >= 0); - Assert(spec->modulus > spec->remainder); - - appendStringInfoString(buf, "FOR VALUES"); - appendStringInfo(buf, " WITH (modulus %d, remainder %d)", - spec->modulus, spec->remainder); - break; - - case PARTITION_STRATEGY_LIST: - Assert(spec->listdatums != NIL); - - appendStringInfoString(buf, "FOR VALUES IN ("); - sep = ""; - foreach(cell, spec->listdatums) - { - Const *val = castNode(Const, lfirst(cell)); - - appendStringInfoString(buf, sep); - get_const_expr(val, context, -1); - sep = ", "; - } - - appendStringInfoChar(buf, ')'); - break; - - case PARTITION_STRATEGY_RANGE: - Assert(spec->lowerdatums != NIL && - spec->upperdatums != NIL && - list_length(spec->lowerdatums) == - list_length(spec->upperdatums)); - - appendStringInfo(buf, "FOR VALUES FROM %s TO %s", - get_range_partbound_string(spec->lowerdatums), - get_range_partbound_string(spec->upperdatums)); - break; - - default: - elog(ERROR, "unrecognized partition strategy: %d", - (int) spec->strategy); - break; - } - } - break; - - case T_List: - { - char *sep; - ListCell *l; - - sep = ""; - foreach(l, (List *) node) - { - appendStringInfoString(buf, sep); - get_rule_expr((Node *) lfirst(l), context, showimplicit); - sep = ", "; - } - } - break; - - case T_TableFunc: - get_tablefunc((TableFunc *) node, context, showimplicit); - break; - - case T_CallStmt: - get_func_expr(((CallStmt *) node)->funcexpr, context, showimplicit); - break; - - default: - elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); - break; - } -} - -/* - * get_rule_expr_toplevel - Parse back a toplevel expression - * - * Same as get_rule_expr(), except that if the expr is just a Var, we pass - * istoplevel = true not false to get_variable(). This causes whole-row Vars - * to get printed with decoration that will prevent expansion of "*". - * We need to use this in contexts such as ROW() and VALUES(), where the - * parser would expand "foo.*" appearing at top level. (In principle we'd - * use this in get_target_list() too, but that has additional worries about - * whether to print AS, so it needs to invoke get_variable() directly anyway.) - */ -static void -get_rule_expr_toplevel(Node *node, deparse_context *context, - bool showimplicit) -{ - if (node && IsA(node, Var)) - (void) get_variable((Var *) node, 0, true, context); - else - get_rule_expr(node, context, showimplicit); -} - -/* - * get_rule_expr_funccall - Parse back a function-call expression - * - * Same as get_rule_expr(), except that we guarantee that the output will - * look like a function call, or like one of the things the grammar treats as - * equivalent to a function call (see the func_expr_windowless production). - * This is needed in places where the grammar uses func_expr_windowless and - * you can't substitute a parenthesized a_expr. If what we have isn't going - * to look like a function call, wrap it in a dummy CAST() expression, which - * will satisfy the grammar --- and, indeed, is likely what the user wrote to - * produce such a thing. - */ -static void -get_rule_expr_funccall(Node *node, deparse_context *context, - bool showimplicit) -{ - if (looks_like_function(node)) - get_rule_expr(node, context, showimplicit); - else - { - StringInfo buf = context->buf; - - appendStringInfoString(buf, "CAST("); - /* no point in showing any top-level implicit cast */ - get_rule_expr(node, context, false); - appendStringInfo(buf, " AS %s)", - format_type_with_typemod(exprType(node), - exprTypmod(node))); - } -} - -/* - * Helper function to identify node types that satisfy func_expr_windowless. - * If in doubt, "false" is always a safe answer. - */ -static bool -looks_like_function(Node *node) -{ - if (node == NULL) - return false; /* probably shouldn't happen */ - switch (nodeTag(node)) - { - case T_FuncExpr: - /* OK, unless it's going to deparse as a cast */ - return (((FuncExpr *) node)->funcformat == COERCE_EXPLICIT_CALL); - case T_NullIfExpr: - case T_CoalesceExpr: - case T_MinMaxExpr: - case T_SQLValueFunction: - case T_XmlExpr: - /* these are all accepted by func_expr_common_subexpr */ - return true; - default: - break; - } - return false; -} - - -/* - * get_oper_expr - Parse back an OpExpr node - */ -static void -get_oper_expr(OpExpr *expr, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid opno = expr->opno; - List *args = expr->args; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - if (list_length(args) == 2) - { - /* binary operator */ - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - get_rule_expr_paren(arg1, context, true, (Node *) expr); - appendStringInfo(buf, " %s ", - generate_operator_name(opno, - exprType(arg1), - exprType(arg2))); - get_rule_expr_paren(arg2, context, true, (Node *) expr); - } - else - { - /* unary operator --- but which side? */ - Node *arg = (Node *) linitial(args); - HeapTuple tp; - Form_pg_operator optup; - - tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for operator %u", opno); - optup = (Form_pg_operator) GETSTRUCT(tp); - switch (optup->oprkind) - { - case 'l': - appendStringInfo(buf, "%s ", - generate_operator_name(opno, - InvalidOid, - exprType(arg))); - get_rule_expr_paren(arg, context, true, (Node *) expr); - break; - case 'r': - get_rule_expr_paren(arg, context, true, (Node *) expr); - appendStringInfo(buf, " %s", - generate_operator_name(opno, - exprType(arg), - InvalidOid)); - break; - default: - elog(ERROR, "bogus oprkind: %d", optup->oprkind); - } - ReleaseSysCache(tp); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); -} - -/* - * get_func_expr - Parse back a FuncExpr node - */ -static void -get_func_expr(FuncExpr *expr, deparse_context *context, - bool showimplicit) -{ - StringInfo buf = context->buf; - Oid funcoid = expr->funcid; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - List *argnames; - bool use_variadic; - ListCell *l; - - /* - * If the function call came from an implicit coercion, then just show the - * first argument --- unless caller wants to see implicit coercions. - */ - if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit) - { - get_rule_expr_paren((Node *) linitial(expr->args), context, - false, (Node *) expr); - return; - } - - /* - * If the function call came from a cast, then show the first argument - * plus an explicit cast operation. - */ - if (expr->funcformat == COERCE_EXPLICIT_CAST || - expr->funcformat == COERCE_IMPLICIT_CAST) - { - Node *arg = linitial(expr->args); - Oid rettype = expr->funcresulttype; - int32 coercedTypmod; - - /* Get the typmod if this is a length-coercion function */ - (void) exprIsLengthCoercion((Node *) expr, &coercedTypmod); - - get_coercion_expr(arg, context, - rettype, coercedTypmod, - (Node *) expr); - - return; - } - - /* - * Normal function: display as proname(args). First we need to extract - * the argument datatypes. - */ - if (list_length(expr->args) > FUNC_MAX_ARGS) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg("too many arguments"))); - nargs = 0; - argnames = NIL; - foreach(l, expr->args) - { - Node *arg = (Node *) lfirst(l); - - if (IsA(arg, NamedArgExpr)) - argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); - argtypes[nargs] = exprType(arg); - nargs++; - } - - appendStringInfo(buf, "%s(", - generate_function_name(funcoid, nargs, - argnames, argtypes, - expr->funcvariadic, - &use_variadic, - context->special_exprkind)); - nargs = 0; - foreach(l, expr->args) - { - if (nargs++ > 0) - appendStringInfoString(buf, ", "); - if (use_variadic && lnext(expr->args, l) == NULL) - appendStringInfoString(buf, "VARIADIC "); - get_rule_expr((Node *) lfirst(l), context, true); - } - appendStringInfoChar(buf, ')'); -} - -/* - * get_agg_expr - Parse back an Aggref node - */ -static void -get_agg_expr(Aggref *aggref, deparse_context *context, - Aggref *original_aggref) -{ - StringInfo buf = context->buf; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - bool use_variadic; - - /* - * For a combining aggregate, we look up and deparse the corresponding - * partial aggregate instead. This is necessary because our input - * argument list has been replaced; the new argument list always has just - * one element, which will point to a partial Aggref that supplies us with - * transition states to combine. - */ - if (DO_AGGSPLIT_COMBINE(aggref->aggsplit)) - { - TargetEntry *tle; - - - Assert(list_length(aggref->args) == 1); - tle = linitial_node(TargetEntry, aggref->args); - resolve_special_varno((Node *) tle->expr, context, - get_agg_combine_expr, original_aggref); - return; - } - - /* - * Mark as PARTIAL, if appropriate. We look to the original aggref so as - * to avoid printing this when recursing from the code just above. - */ - if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit)) - appendStringInfoString(buf, "PARTIAL "); - - /* Extract the argument types as seen by the parser */ - nargs = get_aggregate_argtypes(aggref, argtypes); - - /* Print the aggregate name, schema-qualified if needed */ - appendStringInfo(buf, "%s(%s", - generate_function_name(aggref->aggfnoid, nargs, - NIL, argtypes, - aggref->aggvariadic, - &use_variadic, - context->special_exprkind), - (aggref->aggdistinct != NIL) ? "DISTINCT " : ""); - - if (AGGKIND_IS_ORDERED_SET(aggref->aggkind)) - { - /* - * Ordered-set aggregates do not use "*" syntax. Also, we needn't - * worry about inserting VARIADIC. So we can just dump the direct - * args as-is. - */ - Assert(!aggref->aggvariadic); - get_rule_expr((Node *) aggref->aggdirectargs, context, true); - Assert(aggref->aggorder != NIL); - appendStringInfoString(buf, ") WITHIN GROUP (ORDER BY "); - get_rule_orderby(aggref->aggorder, aggref->args, false, context); - } - else - { - /* aggstar can be set only in zero-argument aggregates */ - if (aggref->aggstar) - appendStringInfoChar(buf, '*'); - else - { - ListCell *l; - int i; - - i = 0; - foreach(l, aggref->args) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - Node *arg = (Node *) tle->expr; - - Assert(!IsA(arg, NamedArgExpr)); - if (tle->resjunk) - continue; - if (i++ > 0) - appendStringInfoString(buf, ", "); - if (use_variadic && i == nargs) - appendStringInfoString(buf, "VARIADIC "); - get_rule_expr(arg, context, true); - } - } - - if (aggref->aggorder != NIL) - { - appendStringInfoString(buf, " ORDER BY "); - get_rule_orderby(aggref->aggorder, aggref->args, false, context); - } - } - - if (aggref->aggfilter != NULL) - { - appendStringInfoString(buf, ") FILTER (WHERE "); - get_rule_expr((Node *) aggref->aggfilter, context, false); - } - - appendStringInfoChar(buf, ')'); -} - -/* - * This is a helper function for get_agg_expr(). It's used when we deparse - * a combining Aggref; resolve_special_varno locates the corresponding partial - * Aggref and then calls this. - */ -static void -get_agg_combine_expr(Node *node, deparse_context *context, void *callback_arg) -{ - Aggref *aggref; - Aggref *original_aggref = callback_arg; - - if (!IsA(node, Aggref)) - elog(ERROR, "combining Aggref does not point to an Aggref"); - - aggref = (Aggref *) node; - get_agg_expr(aggref, context, original_aggref); -} - -/* - * get_windowfunc_expr - Parse back a WindowFunc node - */ -static void -get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - List *argnames; - ListCell *l; - - if (list_length(wfunc->args) > FUNC_MAX_ARGS) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg("too many arguments"))); - nargs = 0; - argnames = NIL; - foreach(l, wfunc->args) - { - Node *arg = (Node *) lfirst(l); - - if (IsA(arg, NamedArgExpr)) - argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); - argtypes[nargs] = exprType(arg); - nargs++; - } - - appendStringInfo(buf, "%s(", - generate_function_name(wfunc->winfnoid, nargs, - argnames, argtypes, - false, NULL, - context->special_exprkind)); - /* winstar can be set only in zero-argument aggregates */ - if (wfunc->winstar) - appendStringInfoChar(buf, '*'); - else - get_rule_expr((Node *) wfunc->args, context, true); - - if (wfunc->aggfilter != NULL) - { - appendStringInfoString(buf, ") FILTER (WHERE "); - get_rule_expr((Node *) wfunc->aggfilter, context, false); - } - - appendStringInfoString(buf, ") OVER "); - - foreach(l, context->windowClause) - { - WindowClause *wc = (WindowClause *) lfirst(l); - - if (wc->winref == wfunc->winref) - { - if (wc->name) - appendStringInfoString(buf, quote_identifier(wc->name)); - else - get_rule_windowspec(wc, context->windowTList, context); - break; - } - } - if (l == NULL) - { - if (context->windowClause) - elog(ERROR, "could not find window clause for winref %u", - wfunc->winref); - - /* - * In EXPLAIN, we don't have window context information available, so - * we have to settle for this: - */ - appendStringInfoString(buf, "(?)"); - } -} - -/* ---------- - * get_coercion_expr - * - * Make a string representation of a value coerced to a specific type - * ---------- - */ -static void -get_coercion_expr(Node *arg, deparse_context *context, - Oid resulttype, int32 resulttypmod, - Node *parentNode) -{ - StringInfo buf = context->buf; - - /* - * Since parse_coerce.c doesn't immediately collapse application of - * length-coercion functions to constants, what we'll typically see in - * such cases is a Const with typmod -1 and a length-coercion function - * right above it. Avoid generating redundant output. However, beware of - * suppressing casts when the user actually wrote something like - * 'foo'::text::char(3). - * - * Note: it might seem that we are missing the possibility of needing to - * print a COLLATE clause for such a Const. However, a Const could only - * have nondefault collation in a post-constant-folding tree, in which the - * length coercion would have been folded too. See also the special - * handling of CollateExpr in coerce_to_target_type(): any collation - * marking will be above the coercion node, not below it. - */ - if (arg && IsA(arg, Const) && - ((Const *) arg)->consttype == resulttype && - ((Const *) arg)->consttypmod == -1) - { - /* Show the constant without normal ::typename decoration */ - get_const_expr((Const *) arg, context, -1); - } - else - { - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg, context, false, parentNode); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - appendStringInfo(buf, "::%s", - format_type_with_typemod(resulttype, resulttypmod)); -} - -/* ---------- - * get_const_expr - * - * Make a string representation of a Const - * - * showtype can be -1 to never show "::typename" decoration, or +1 to always - * show it, or 0 to show it only if the constant wouldn't be assumed to be - * the right type by default. - * - * If the Const's collation isn't default for its type, show that too. - * We mustn't do this when showtype is -1 (since that means the caller will - * print "::typename", and we can't put a COLLATE clause in between). It's - * caller's responsibility that collation isn't missed in such cases. - * ---------- - */ -static void -get_const_expr(Const *constval, deparse_context *context, int showtype) -{ - StringInfo buf = context->buf; - Oid typoutput; - bool typIsVarlena; - char *extval; - bool needlabel = false; - - if (constval->constisnull) - { - /* - * Always label the type of a NULL constant to prevent misdecisions - * about type when reparsing. - */ - appendStringInfoString(buf, "NULL"); - if (showtype >= 0) - { - appendStringInfo(buf, "::%s", - format_type_with_typemod(constval->consttype, - constval->consttypmod)); - get_const_collation(constval, context); - } - return; - } - - getTypeOutputInfo(constval->consttype, - &typoutput, &typIsVarlena); - - extval = OidOutputFunctionCall(typoutput, constval->constvalue); - - switch (constval->consttype) - { - case INT4OID: - - /* - * INT4 can be printed without any decoration, unless it is - * negative; in that case print it as '-nnn'::integer to ensure - * that the output will re-parse as a constant, not as a constant - * plus operator. In most cases we could get away with printing - * (-nnn) instead, because of the way that gram.y handles negative - * literals; but that doesn't work for INT_MIN, and it doesn't - * seem that much prettier anyway. - */ - if (extval[0] != '-') - appendStringInfoString(buf, extval); - else - { - appendStringInfo(buf, "'%s'", extval); - needlabel = true; /* we must attach a cast */ - } - break; - - case NUMERICOID: - - /* - * NUMERIC can be printed without quotes if it looks like a float - * constant (not an integer, and not Infinity or NaN) and doesn't - * have a leading sign (for the same reason as for INT4). - */ - if (isdigit((unsigned char) extval[0]) && - strcspn(extval, "eE.") != strlen(extval)) - { - appendStringInfoString(buf, extval); - } - else - { - appendStringInfo(buf, "'%s'", extval); - needlabel = true; /* we must attach a cast */ - } - break; - - case BITOID: - case VARBITOID: - appendStringInfo(buf, "B'%s'", extval); - break; - - case BOOLOID: - if (strcmp(extval, "t") == 0) - appendStringInfoString(buf, "true"); - else - appendStringInfoString(buf, "false"); - break; - - default: - simple_quote_literal(buf, extval); - break; - } - - pfree(extval); - - if (showtype < 0) - return; - - /* - * For showtype == 0, append ::typename unless the constant will be - * implicitly typed as the right type when it is read in. - * - * XXX this code has to be kept in sync with the behavior of the parser, - * especially make_const. - */ - switch (constval->consttype) - { - case BOOLOID: - case UNKNOWNOID: - /* These types can be left unlabeled */ - needlabel = false; - break; - case INT4OID: - /* We determined above whether a label is needed */ - break; - case NUMERICOID: - - /* - * Float-looking constants will be typed as numeric, which we - * checked above; but if there's a nondefault typmod we need to - * show it. - */ - needlabel |= (constval->consttypmod >= 0); - break; - default: - needlabel = true; - break; - } - if (needlabel || showtype > 0) - appendStringInfo(buf, "::%s", - format_type_with_typemod(constval->consttype, - constval->consttypmod)); - - get_const_collation(constval, context); -} - -/* - * helper for get_const_expr: append COLLATE if needed - */ -static void -get_const_collation(Const *constval, deparse_context *context) -{ - StringInfo buf = context->buf; - - if (OidIsValid(constval->constcollid)) - { - Oid typcollation = get_typcollation(constval->consttype); - - if (constval->constcollid != typcollation) - { - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(constval->constcollid)); - } - } -} - -/* - * simple_quote_literal - Format a string as a SQL literal, append to buf - */ -static void -simple_quote_literal(StringInfo buf, const char *val) -{ - const char *valptr; - - /* - * We form the string literal according to the prevailing setting of - * standard_conforming_strings; we never use E''. User is responsible for - * making sure result is used correctly. - */ - appendStringInfoChar(buf, '\''); - for (valptr = val; *valptr; valptr++) - { - char ch = *valptr; - - if (SQL_STR_DOUBLE(ch, !standard_conforming_strings)) - appendStringInfoChar(buf, ch); - appendStringInfoChar(buf, ch); - } - appendStringInfoChar(buf, '\''); -} - - -/* ---------- - * get_sublink_expr - Parse back a sublink - * ---------- - */ -static void -get_sublink_expr(SubLink *sublink, deparse_context *context) -{ - StringInfo buf = context->buf; - Query *query = (Query *) (sublink->subselect); - char *opname = NULL; - bool need_paren; - - if (sublink->subLinkType == ARRAY_SUBLINK) - appendStringInfoString(buf, "ARRAY("); - else - appendStringInfoChar(buf, '('); - - /* - * Note that we print the name of only the first operator, when there are - * multiple combining operators. This is an approximation that could go - * wrong in various scenarios (operators in different schemas, renamed - * operators, etc) but there is not a whole lot we can do about it, since - * the syntax allows only one operator to be shown. - */ - if (sublink->testexpr) - { - if (IsA(sublink->testexpr, OpExpr)) - { - /* single combining operator */ - OpExpr *opexpr = (OpExpr *) sublink->testexpr; - - get_rule_expr(linitial(opexpr->args), context, true); - opname = generate_operator_name(opexpr->opno, - exprType(linitial(opexpr->args)), - exprType(lsecond(opexpr->args))); - } - else if (IsA(sublink->testexpr, BoolExpr)) - { - /* multiple combining operators, = or <> cases */ - char *sep; - ListCell *l; - - appendStringInfoChar(buf, '('); - sep = ""; - foreach(l, ((BoolExpr *) sublink->testexpr)->args) - { - OpExpr *opexpr = lfirst_node(OpExpr, l); - - appendStringInfoString(buf, sep); - get_rule_expr(linitial(opexpr->args), context, true); - if (!opname) - opname = generate_operator_name(opexpr->opno, - exprType(linitial(opexpr->args)), - exprType(lsecond(opexpr->args))); - sep = ", "; - } - appendStringInfoChar(buf, ')'); - } - else if (IsA(sublink->testexpr, RowCompareExpr)) - { - /* multiple combining operators, < <= > >= cases */ - RowCompareExpr *rcexpr = (RowCompareExpr *) sublink->testexpr; - - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) rcexpr->largs, context, true); - opname = generate_operator_name(linitial_oid(rcexpr->opnos), - exprType(linitial(rcexpr->largs)), - exprType(linitial(rcexpr->rargs))); - appendStringInfoChar(buf, ')'); - } - else - elog(ERROR, "unrecognized testexpr type: %d", - (int) nodeTag(sublink->testexpr)); - } - - need_paren = true; - - switch (sublink->subLinkType) - { - case EXISTS_SUBLINK: - appendStringInfoString(buf, "EXISTS "); - break; - - case ANY_SUBLINK: - if (strcmp(opname, "=") == 0) /* Represent = ANY as IN */ - appendStringInfoString(buf, " IN "); - else - appendStringInfo(buf, " %s ANY ", opname); - break; - - case ALL_SUBLINK: - appendStringInfo(buf, " %s ALL ", opname); - break; - - case ROWCOMPARE_SUBLINK: - appendStringInfo(buf, " %s ", opname); - break; - - case EXPR_SUBLINK: - case MULTIEXPR_SUBLINK: - case ARRAY_SUBLINK: - need_paren = false; - break; - - case CTE_SUBLINK: /* shouldn't occur in a SubLink */ - default: - elog(ERROR, "unrecognized sublink type: %d", - (int) sublink->subLinkType); - break; - } - - if (need_paren) - appendStringInfoChar(buf, '('); - - get_query_def(query, buf, context->namespaces, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - - if (need_paren) - appendStringInfoString(buf, "))"); - else - appendStringInfoChar(buf, ')'); -} - - -/* ---------- - * get_tablefunc - Parse back a table function - * ---------- - */ -static void -get_tablefunc(TableFunc *tf, deparse_context *context, bool showimplicit) -{ - StringInfo buf = context->buf; - - /* XMLTABLE is the only existing implementation. */ - - appendStringInfoString(buf, "XMLTABLE("); - - if (tf->ns_uris != NIL) - { - ListCell *lc1, - *lc2; - bool first = true; - - appendStringInfoString(buf, "XMLNAMESPACES ("); - forboth(lc1, tf->ns_uris, lc2, tf->ns_names) - { - Node *expr = (Node *) lfirst(lc1); - char *name = strVal(lfirst(lc2)); - - if (!first) - appendStringInfoString(buf, ", "); - else - first = false; - - if (name != NULL) - { - get_rule_expr(expr, context, showimplicit); - appendStringInfo(buf, " AS %s", name); - } - else - { - appendStringInfoString(buf, "DEFAULT "); - get_rule_expr(expr, context, showimplicit); - } - } - appendStringInfoString(buf, "), "); - } - - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) tf->rowexpr, context, showimplicit); - appendStringInfoString(buf, ") PASSING ("); - get_rule_expr((Node *) tf->docexpr, context, showimplicit); - appendStringInfoChar(buf, ')'); - - if (tf->colexprs != NIL) - { - ListCell *l1; - ListCell *l2; - ListCell *l3; - ListCell *l4; - ListCell *l5; - int colnum = 0; - - appendStringInfoString(buf, " COLUMNS "); - forfive(l1, tf->colnames, l2, tf->coltypes, l3, tf->coltypmods, - l4, tf->colexprs, l5, tf->coldefexprs) - { - char *colname = strVal(lfirst(l1)); - Oid typid = lfirst_oid(l2); - int32 typmod = lfirst_int(l3); - Node *colexpr = (Node *) lfirst(l4); - Node *coldefexpr = (Node *) lfirst(l5); - bool ordinality = (tf->ordinalitycol == colnum); - bool notnull = bms_is_member(colnum, tf->notnulls); - - if (colnum > 0) - appendStringInfoString(buf, ", "); - colnum++; - - appendStringInfo(buf, "%s %s", quote_identifier(colname), - ordinality ? "FOR ORDINALITY" : - format_type_with_typemod(typid, typmod)); - if (ordinality) - continue; - - if (coldefexpr != NULL) - { - appendStringInfoString(buf, " DEFAULT ("); - get_rule_expr((Node *) coldefexpr, context, showimplicit); - appendStringInfoChar(buf, ')'); - } - if (colexpr != NULL) - { - appendStringInfoString(buf, " PATH ("); - get_rule_expr((Node *) colexpr, context, showimplicit); - appendStringInfoChar(buf, ')'); - } - if (notnull) - appendStringInfoString(buf, " NOT NULL"); - } - } - - appendStringInfoChar(buf, ')'); -} - -/* ---------- - * get_from_clause - Parse back a FROM clause - * - * "prefix" is the keyword that denotes the start of the list of FROM - * elements. It is FROM when used to parse back SELECT and UPDATE, but - * is USING when parsing back DELETE. - * ---------- - */ -static void -get_from_clause(Query *query, const char *prefix, deparse_context *context) -{ - StringInfo buf = context->buf; - bool first = true; - ListCell *l; - - /* - * We use the query's jointree as a guide to what to print. However, we - * must ignore auto-added RTEs that are marked not inFromCl. (These can - * only appear at the top level of the jointree, so it's sufficient to - * check here.) This check also ensures we ignore the rule pseudo-RTEs - * for NEW and OLD. - */ - foreach(l, query->jointree->fromlist) - { - Node *jtnode = (Node *) lfirst(l); - - if (IsA(jtnode, RangeTblRef)) - { - int varno = ((RangeTblRef *) jtnode)->rtindex; - RangeTblEntry *rte = rt_fetch(varno, query->rtable); - - if (!rte->inFromCl) - continue; - } - - if (first) - { - appendContextKeyword(context, prefix, - -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); - first = false; - - get_from_clause_item(jtnode, query, context); - } - else - { - StringInfoData itembuf; - - appendStringInfoString(buf, ", "); - - /* - * Put the new FROM item's text into itembuf so we can decide - * after we've got it whether or not it needs to go on a new line. - */ - initStringInfo(&itembuf); - context->buf = &itembuf; - - get_from_clause_item(jtnode, query, context); - - /* Restore context's output buffer */ - context->buf = buf; - - /* Consider line-wrapping if enabled */ - if (PRETTY_INDENT(context) && context->wrapColumn >= 0) - { - /* Does the new item start with a new line? */ - if (itembuf.len > 0 && itembuf.data[0] == '\n') - { - /* If so, we shouldn't add anything */ - /* instead, remove any trailing spaces currently in buf */ - removeStringInfoSpaces(buf); - } - else - { - char *trailing_nl; - - /* Locate the start of the current line in the buffer */ - trailing_nl = strrchr(buf->data, '\n'); - if (trailing_nl == NULL) - trailing_nl = buf->data; - else - trailing_nl++; - - /* - * Add a newline, plus some indentation, if the new item - * would cause an overflow. - */ - if (strlen(trailing_nl) + itembuf.len > context->wrapColumn) - appendContextKeyword(context, "", -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_VAR); - } - } - - /* Add the new item */ - appendStringInfoString(buf, itembuf.data); - - /* clean up */ - pfree(itembuf.data); - } - } -} - -static void -get_from_clause_item(Node *jtnode, Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); - - if (IsA(jtnode, RangeTblRef)) - { - int varno = ((RangeTblRef *) jtnode)->rtindex; - RangeTblEntry *rte = rt_fetch(varno, query->rtable); - char *refname = get_rtable_name(varno, context); - deparse_columns *colinfo = deparse_columns_fetch(varno, dpns); - RangeTblFunction *rtfunc1 = NULL; - bool printalias; - CitusRTEKind rteKind = GetRangeTblKind(rte); - - if (rte->lateral) - appendStringInfoString(buf, "LATERAL "); - - /* Print the FROM item proper */ - switch (rte->rtekind) - { - case RTE_RELATION: - /* Normal relation RTE */ - appendStringInfo(buf, "%s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, - context->namespaces)); - break; - case RTE_SUBQUERY: - /* Subquery RTE */ - appendStringInfoChar(buf, '('); - get_query_def(rte->subquery, buf, context->namespaces, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - appendStringInfoChar(buf, ')'); - break; - case RTE_FUNCTION: - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "%s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, - fragmentTableName)); - break; - } - - /* Function RTE */ - rtfunc1 = (RangeTblFunction *) linitial(rte->functions); - - /* - * Omit ROWS FROM() syntax for just one function, unless it - * has both a coldeflist and WITH ORDINALITY. If it has both, - * we must use ROWS FROM() syntax to avoid ambiguity about - * whether the coldeflist includes the ordinality column. - */ - if (list_length(rte->functions) == 1 && - (rtfunc1->funccolnames == NIL || !rte->funcordinality)) - { - get_rule_expr_funccall(rtfunc1->funcexpr, context, true); - /* we'll print the coldeflist below, if it has one */ - } - else - { - bool all_unnest; - ListCell *lc; - - /* - * If all the function calls in the list are to unnest, - * and none need a coldeflist, then collapse the list back - * down to UNNEST(args). (If we had more than one - * built-in unnest function, this would get more - * difficult.) - * - * XXX This is pretty ugly, since it makes not-terribly- - * future-proof assumptions about what the parser would do - * with the output; but the alternative is to emit our - * nonstandard ROWS FROM() notation for what might have - * been a perfectly spec-compliant multi-argument - * UNNEST(). - */ - all_unnest = true; - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - - if (!IsA(rtfunc->funcexpr, FuncExpr) || - ((FuncExpr *) rtfunc->funcexpr)->funcid != F_ARRAY_UNNEST || - rtfunc->funccolnames != NIL) - { - all_unnest = false; - break; - } - } - - if (all_unnest) - { - List *allargs = NIL; - - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - List *args = ((FuncExpr *) rtfunc->funcexpr)->args; - - allargs = list_concat(allargs, args); - } - - appendStringInfoString(buf, "UNNEST("); - get_rule_expr((Node *) allargs, context, true); - appendStringInfoChar(buf, ')'); - } - else - { - int funcno = 0; - - appendStringInfoString(buf, "ROWS FROM("); - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - - if (funcno > 0) - appendStringInfoString(buf, ", "); - get_rule_expr_funccall(rtfunc->funcexpr, context, true); - if (rtfunc->funccolnames != NIL) - { - /* Reconstruct the column definition list */ - appendStringInfoString(buf, " AS "); - get_from_clause_coldeflist(rtfunc, - NULL, - context); - } - funcno++; - } - appendStringInfoChar(buf, ')'); - } - /* prevent printing duplicate coldeflist below */ - rtfunc1 = NULL; - } - if (rte->funcordinality) - appendStringInfoString(buf, " WITH ORDINALITY"); - break; - case RTE_TABLEFUNC: - get_tablefunc(rte->tablefunc, context, true); - break; - case RTE_VALUES: - /* Values list RTE */ - appendStringInfoChar(buf, '('); - get_values_def(rte->values_lists, context); - appendStringInfoChar(buf, ')'); - break; - case RTE_CTE: - appendStringInfoString(buf, quote_identifier(rte->ctename)); - break; - default: - elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind); - break; - } - - /* Print the relation alias, if needed */ - printalias = false; - if (rte->alias != NULL) - { - /* Always print alias if user provided one */ - printalias = true; - } - else if (colinfo->printaliases) - { - /* Always print alias if we need to print column aliases */ - printalias = true; - } - else if (rte->rtekind == RTE_RELATION) - { - /* - * No need to print alias if it's same as relation name (this - * would normally be the case, but not if set_rtable_names had to - * resolve a conflict). - */ - if (strcmp(refname, get_relation_name(rte->relid)) != 0) - printalias = true; - } - else if (rte->rtekind == RTE_FUNCTION) - { - /* - * For a function RTE, always print alias. This covers possible - * renaming of the function and/or instability of the - * FigureColname rules for things that aren't simple functions. - * Note we'd need to force it anyway for the columndef list case. - */ - printalias = true; - } - else if (rte->rtekind == RTE_VALUES) - { - /* Alias is syntactically required for VALUES */ - printalias = true; - } - else if (rte->rtekind == RTE_CTE) - { - /* - * No need to print alias if it's same as CTE name (this would - * normally be the case, but not if set_rtable_names had to - * resolve a conflict). - */ - if (strcmp(refname, rte->ctename) != 0) - printalias = true; - } - else if (rte->rtekind == RTE_SUBQUERY) - { - /* subquery requires alias too */ - printalias = true; - } - if (printalias) - appendStringInfo(buf, " %s", quote_identifier(refname)); - - /* Print the column definitions or aliases, if needed */ - if (rtfunc1 && rtfunc1->funccolnames != NIL) - { - /* Reconstruct the columndef list, which is also the aliases */ - get_from_clause_coldeflist(rtfunc1, colinfo, context); - } - else if (GetRangeTblKind(rte) != CITUS_RTE_SHARD || - (rte->alias != NULL && rte->alias->colnames != NIL)) - { - /* Else print column aliases as needed */ - get_column_alias_list(colinfo, context); - } - /* check if column's are given aliases in distributed tables */ - else if (colinfo->parentUsing != NIL) - { - Assert(colinfo->printaliases); - get_column_alias_list(colinfo, context); - } - - /* Tablesample clause must go after any alias */ - if ((rteKind == CITUS_RTE_RELATION || rteKind == CITUS_RTE_SHARD) && - rte->tablesample) - { - get_tablesample_def(rte->tablesample, context); - } - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); - bool need_paren_on_right; - - need_paren_on_right = PRETTY_PAREN(context) && - !IsA(j->rarg, RangeTblRef) && - !(IsA(j->rarg, JoinExpr) &&((JoinExpr *) j->rarg)->alias != NULL); - - if (!PRETTY_PAREN(context) || j->alias != NULL) - appendStringInfoChar(buf, '('); - - get_from_clause_item(j->larg, query, context); - - switch (j->jointype) - { - case JOIN_INNER: - if (j->quals) - appendContextKeyword(context, " JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - else - appendContextKeyword(context, " CROSS JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_LEFT: - appendContextKeyword(context, " LEFT JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_FULL: - appendContextKeyword(context, " FULL JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_RIGHT: - appendContextKeyword(context, " RIGHT JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - default: - elog(ERROR, "unrecognized join type: %d", - (int) j->jointype); - } - - if (need_paren_on_right) - appendStringInfoChar(buf, '('); - get_from_clause_item(j->rarg, query, context); - if (need_paren_on_right) - appendStringInfoChar(buf, ')'); - - if (j->usingClause) - { - ListCell *lc; - bool first = true; - - appendStringInfoString(buf, " USING ("); - /* Use the assigned names, not what's in usingClause */ - foreach(lc, colinfo->usingNames) - { - char *colname = (char *) lfirst(lc); - - if (first) - first = false; - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, quote_identifier(colname)); - } - appendStringInfoChar(buf, ')'); - } - else if (j->quals) - { - appendStringInfoString(buf, " ON "); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr(j->quals, context, false); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - else if (j->jointype != JOIN_INNER) - { - /* If we didn't say CROSS JOIN above, we must provide an ON */ - appendStringInfoString(buf, " ON TRUE"); - } - - if (!PRETTY_PAREN(context) || j->alias != NULL) - appendStringInfoChar(buf, ')'); - - /* Yes, it's correct to put alias after the right paren ... */ - if (j->alias != NULL) - { - /* - * Note that it's correct to emit an alias clause if and only if - * there was one originally. Otherwise we'd be converting a named - * join to unnamed or vice versa, which creates semantic - * subtleties we don't want. However, we might print a different - * alias name than was there originally. - */ - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(j->rtindex, - context))); - get_column_alias_list(colinfo, context); - } - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); -} - -/* - * get_column_alias_list - print column alias list for an RTE - * - * Caller must already have printed the relation's alias name. - */ -static void -get_column_alias_list(deparse_columns *colinfo, deparse_context *context) -{ - StringInfo buf = context->buf; - int i; - bool first = true; - - /* Don't print aliases if not needed */ - if (!colinfo->printaliases) - return; - - for (i = 0; i < colinfo->num_new_cols; i++) - { - char *colname = colinfo->new_colnames[i]; - - if (first) - { - appendStringInfoChar(buf, '('); - first = false; - } - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, quote_identifier(colname)); - } - if (!first) - appendStringInfoChar(buf, ')'); -} - -/* - * get_from_clause_coldeflist - reproduce FROM clause coldeflist - * - * When printing a top-level coldeflist (which is syntactically also the - * relation's column alias list), use column names from colinfo. But when - * printing a coldeflist embedded inside ROWS FROM(), we prefer to use the - * original coldeflist's names, which are available in rtfunc->funccolnames. - * Pass NULL for colinfo to select the latter behavior. - * - * The coldeflist is appended immediately (no space) to buf. Caller is - * responsible for ensuring that an alias or AS is present before it. - */ -static void -get_from_clause_coldeflist(RangeTblFunction *rtfunc, - deparse_columns *colinfo, - deparse_context *context) -{ - StringInfo buf = context->buf; - ListCell *l1; - ListCell *l2; - ListCell *l3; - ListCell *l4; - int i; - - appendStringInfoChar(buf, '('); - - i = 0; - forfour(l1, rtfunc->funccoltypes, - l2, rtfunc->funccoltypmods, - l3, rtfunc->funccolcollations, - l4, rtfunc->funccolnames) - { - Oid atttypid = lfirst_oid(l1); - int32 atttypmod = lfirst_int(l2); - Oid attcollation = lfirst_oid(l3); - char *attname; - - if (colinfo) - attname = colinfo->colnames[i]; - else - attname = strVal(lfirst(l4)); - - Assert(attname); /* shouldn't be any dropped columns here */ - - if (i > 0) - appendStringInfoString(buf, ", "); - appendStringInfo(buf, "%s %s", - quote_identifier(attname), - format_type_with_typemod(atttypid, atttypmod)); - if (OidIsValid(attcollation) && - attcollation != get_typcollation(atttypid)) - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(attcollation)); - - i++; - } - - appendStringInfoChar(buf, ')'); -} - -/* - * get_tablesample_def - print a TableSampleClause - */ -static void -get_tablesample_def(TableSampleClause *tablesample, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid argtypes[1]; - int nargs; - ListCell *l; - - /* - * We should qualify the handler's function name if it wouldn't be - * resolved by lookup in the current search path. - */ - argtypes[0] = INTERNALOID; - appendStringInfo(buf, " TABLESAMPLE %s (", - generate_function_name(tablesample->tsmhandler, 1, - NIL, argtypes, - false, NULL, EXPR_KIND_NONE)); - - nargs = 0; - foreach(l, tablesample->args) - { - if (nargs++ > 0) - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) lfirst(l), context, false); - } - appendStringInfoChar(buf, ')'); - - if (tablesample->repeatable != NULL) - { - appendStringInfoString(buf, " REPEATABLE ("); - get_rule_expr((Node *) tablesample->repeatable, context, false); - appendStringInfoChar(buf, ')'); - } -} - - -/* - * get_opclass_name - fetch name of an index operator class - * - * The opclass name is appended (after a space) to buf. - * - * Output is suppressed if the opclass is the default for the given - * actual_datatype. (If you don't want this behavior, just pass - * InvalidOid for actual_datatype.) - */ -static void -get_opclass_name(Oid opclass, Oid actual_datatype, - StringInfo buf) -{ - HeapTuple ht_opc; - Form_pg_opclass opcrec; - char *opcname; - char *nspname; - - ht_opc = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); - if (!HeapTupleIsValid(ht_opc)) - elog(ERROR, "cache lookup failed for opclass %u", opclass); - opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc); - - if (!OidIsValid(actual_datatype) || - GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) - { - /* Okay, we need the opclass name. Do we need to qualify it? */ - opcname = NameStr(opcrec->opcname); - if (OpclassIsVisible(opclass)) - appendStringInfo(buf, " %s", quote_identifier(opcname)); - else - { - nspname = get_namespace_name(opcrec->opcnamespace); - appendStringInfo(buf, " %s.%s", - quote_identifier(nspname), - quote_identifier(opcname)); - } - } - ReleaseSysCache(ht_opc); -} - -/* - * processIndirection - take care of array and subfield assignment - * - * We strip any top-level FieldStore or assignment SubscriptingRef nodes that - * appear in the input, printing them as decoration for the base column - * name (which we assume the caller just printed). We might also need to - * strip CoerceToDomain nodes, but only ones that appear above assignment - * nodes. - * - * Returns the subexpression that's to be assigned. - */ -static Node * -processIndirection(Node *node, deparse_context *context) -{ - StringInfo buf = context->buf; - CoerceToDomain *cdomain = NULL; - - for (;;) - { - if (node == NULL) - break; - if (IsA(node, FieldStore)) - { - FieldStore *fstore = (FieldStore *) node; - Oid typrelid; - char *fieldname; - - /* lookup tuple type */ - typrelid = get_typ_typrelid(fstore->resulttype); - if (!OidIsValid(typrelid)) - elog(ERROR, "argument type %s of FieldStore is not a tuple type", - format_type_be(fstore->resulttype)); - - /* - * Print the field name. There should only be one target field in - * stored rules. There could be more than that in executable - * target lists, but this function cannot be used for that case. - */ - Assert(list_length(fstore->fieldnums) == 1); - fieldname = get_attname(typrelid, - linitial_int(fstore->fieldnums), false); - appendStringInfo(buf, ".%s", quote_identifier(fieldname)); - - /* - * We ignore arg since it should be an uninteresting reference to - * the target column or subcolumn. - */ - node = (Node *) linitial(fstore->newvals); - } - else if (IsA(node, SubscriptingRef)) - { - SubscriptingRef *sbsref = (SubscriptingRef *) node; - - if (sbsref->refassgnexpr == NULL) - break; - printSubscripts(sbsref, context); - - /* - * We ignore refexpr since it should be an uninteresting reference - * to the target column or subcolumn. - */ - node = (Node *) sbsref->refassgnexpr; - } - else if (IsA(node, CoerceToDomain)) - { - cdomain = (CoerceToDomain *) node; - /* If it's an explicit domain coercion, we're done */ - if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) - break; - /* Tentatively descend past the CoerceToDomain */ - node = (Node *) cdomain->arg; - } - else - break; - } - - /* - * If we descended past a CoerceToDomain whose argument turned out not to - * be a FieldStore or array assignment, back up to the CoerceToDomain. - * (This is not enough to be fully correct if there are nested implicit - * CoerceToDomains, but such cases shouldn't ever occur.) - */ - if (cdomain && node == (Node *) cdomain->arg) - node = (Node *) cdomain; - - return node; -} - -static void -printSubscripts(SubscriptingRef *sbsref, deparse_context *context) -{ - StringInfo buf = context->buf; - ListCell *lowlist_item; - ListCell *uplist_item; - - lowlist_item = list_head(sbsref->reflowerindexpr); /* could be NULL */ - foreach(uplist_item, sbsref->refupperindexpr) - { - appendStringInfoChar(buf, '['); - if (lowlist_item) - { - /* If subexpression is NULL, get_rule_expr prints nothing */ - get_rule_expr((Node *) lfirst(lowlist_item), context, false); - appendStringInfoChar(buf, ':'); - lowlist_item = lnext(sbsref->reflowerindexpr, lowlist_item); - } - /* If subexpression is NULL, get_rule_expr prints nothing */ - get_rule_expr((Node *) lfirst(uplist_item), context, false); - appendStringInfoChar(buf, ']'); - } -} - -/* - * get_relation_name - * Get the unqualified name of a relation specified by OID - * - * This differs from the underlying get_rel_name() function in that it will - * throw error instead of silently returning NULL if the OID is bad. - */ -static char * -get_relation_name(Oid relid) -{ - char *relname = get_rel_name(relid); - - if (!relname) - elog(ERROR, "cache lookup failed for relation %u", relid); - return relname; -} - -/* - * generate_relation_or_shard_name - * Compute the name to display for a relation or shard - * - * If the provided relid is equal to the provided distrelid, this function - * returns a shard-extended relation name; otherwise, it falls through to a - * simple generate_relation_name call. - */ -static char * -generate_relation_or_shard_name(Oid relid, Oid distrelid, int64 shardid, - List *namespaces) -{ - char *relname = NULL; - - if (relid == distrelid) - { - relname = get_relation_name(relid); - - if (shardid > 0) - { - Oid schemaOid = get_rel_namespace(relid); - char *schemaName = get_namespace_name(schemaOid); - - AppendShardIdToName(&relname, shardid); - - relname = quote_qualified_identifier(schemaName, relname); - } - } - else - { - relname = generate_relation_name(relid, namespaces); - } - - return relname; -} - -/* - * generate_relation_name - * Compute the name to display for a relation specified by OID - * - * The result includes all necessary quoting and schema-prefixing. - * - * If namespaces isn't NIL, it must be a list of deparse_namespace nodes. - * We will forcibly qualify the relation name if it equals any CTE name - * visible in the namespace list. - */ -char * -generate_relation_name(Oid relid, List *namespaces) -{ - HeapTuple tp; - Form_pg_class reltup; - bool need_qual; - ListCell *nslist; - char *relname; - char *nspname; - char *result; - - tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for relation %u", relid); - reltup = (Form_pg_class) GETSTRUCT(tp); - relname = NameStr(reltup->relname); - - /* Check for conflicting CTE name */ - need_qual = false; - foreach(nslist, namespaces) - { - deparse_namespace *dpns = (deparse_namespace *) lfirst(nslist); - ListCell *ctlist; - - foreach(ctlist, dpns->ctes) - { - CommonTableExpr *cte = (CommonTableExpr *) lfirst(ctlist); - - if (strcmp(cte->ctename, relname) == 0) - { - need_qual = true; - break; - } - } - if (need_qual) - break; - } - - /* Otherwise, qualify the name if not visible in search path */ - if (!need_qual) - need_qual = !RelationIsVisible(relid); - - if (need_qual) - nspname = get_namespace_name(reltup->relnamespace); - else - nspname = NULL; - - result = quote_qualified_identifier(nspname, relname); - - ReleaseSysCache(tp); - - return result; -} - - -/* - * generate_rte_shard_name returns the qualified name of the shard given a - * CITUS_RTE_SHARD range table entry. - */ -static char * -generate_rte_shard_name(RangeTblEntry *rangeTableEntry) -{ - char *shardSchemaName = NULL; - char *shardTableName = NULL; - - Assert(GetRangeTblKind(rangeTableEntry) == CITUS_RTE_SHARD); - - ExtractRangeTblExtraData(rangeTableEntry, NULL, &shardSchemaName, &shardTableName, - NULL); - - return generate_fragment_name(shardSchemaName, shardTableName); -} - - -/* - * generate_fragment_name - * Compute the name to display for a shard or merged table - * - * The result includes all necessary quoting and schema-prefixing. The schema - * name can be NULL for regular shards. For merged tables, they are always - * declared within a job-specific schema, and therefore can't have null schema - * names. - */ -static char * -generate_fragment_name(char *schemaName, char *tableName) -{ - StringInfo fragmentNameString = makeStringInfo(); - - if (schemaName != NULL) - { - appendStringInfo(fragmentNameString, "%s.%s", quote_identifier(schemaName), - quote_identifier(tableName)); - } - else - { - appendStringInfoString(fragmentNameString, quote_identifier(tableName)); - } - - return fragmentNameString->data; -} - -/* - * generate_function_name - * Compute the name to display for a function specified by OID, - * given that it is being called with the specified actual arg names and - * types. (Those matter because of ambiguous-function resolution rules.) - * - * If we're dealing with a potentially variadic function (in practice, this - * means a FuncExpr or Aggref, not some other way of calling a function), then - * has_variadic must specify whether variadic arguments have been merged, - * and *use_variadic_p will be set to indicate whether to print VARIADIC in - * the output. For non-FuncExpr cases, has_variadic should be false and - * use_variadic_p can be NULL. - * - * The result includes all necessary quoting and schema-prefixing. - */ -static char * -generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, - bool has_variadic, bool *use_variadic_p, - ParseExprKind special_exprkind) -{ - char *result; - HeapTuple proctup; - Form_pg_proc procform; - char *proname; - bool use_variadic; - char *nspname; - FuncDetailCode p_result; - Oid p_funcid; - Oid p_rettype; - bool p_retset; - int p_nvargs; - Oid p_vatype; - Oid *p_true_typeids; - bool force_qualify = false; - - proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); - if (!HeapTupleIsValid(proctup)) - elog(ERROR, "cache lookup failed for function %u", funcid); - procform = (Form_pg_proc) GETSTRUCT(proctup); - proname = NameStr(procform->proname); - - /* - * Due to parser hacks to avoid needing to reserve CUBE, we need to force - * qualification in some special cases. - */ - if (special_exprkind == EXPR_KIND_GROUP_BY) - { - if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0) - force_qualify = true; - } - - /* - * Determine whether VARIADIC should be printed. We must do this first - * since it affects the lookup rules in func_get_detail(). - * - * Currently, we always print VARIADIC if the function has a merged - * variadic-array argument. Note that this is always the case for - * functions taking a VARIADIC argument type other than VARIADIC ANY. - * - * In principle, if VARIADIC wasn't originally specified and the array - * actual argument is deconstructable, we could print the array elements - * separately and not print VARIADIC, thus more nearly reproducing the - * original input. For the moment that seems like too much complication - * for the benefit, and anyway we do not know whether VARIADIC was - * originally specified if it's a non-ANY type. - */ - if (use_variadic_p) - { - /* Parser should not have set funcvariadic unless fn is variadic */ - Assert(!has_variadic || OidIsValid(procform->provariadic)); - use_variadic = has_variadic; - *use_variadic_p = use_variadic; - } - else - { - Assert(!has_variadic); - use_variadic = false; - } - - /* - * The idea here is to schema-qualify only if the parser would fail to - * resolve the correct function given the unqualified func name with the - * specified argtypes and VARIADIC flag. But if we already decided to - * force qualification, then we can skip the lookup and pretend we didn't - * find it. - */ - if (!force_qualify) - p_result = func_get_detail(list_make1(makeString(proname)), - NIL, argnames, nargs, argtypes, - !use_variadic, true, - &p_funcid, &p_rettype, - &p_retset, &p_nvargs, &p_vatype, - &p_true_typeids, NULL); - else - { - p_result = FUNCDETAIL_NOTFOUND; - p_funcid = InvalidOid; - } - - if ((p_result == FUNCDETAIL_NORMAL || - p_result == FUNCDETAIL_AGGREGATE || - p_result == FUNCDETAIL_WINDOWFUNC) && - p_funcid == funcid) - nspname = NULL; - else - nspname = get_namespace_name(procform->pronamespace); - - result = quote_qualified_identifier(nspname, proname); - - ReleaseSysCache(proctup); - - return result; -} - -/* - * generate_operator_name - * Compute the name to display for an operator specified by OID, - * given that it is being called with the specified actual arg types. - * (Arg types matter because of ambiguous-operator resolution rules. - * Pass InvalidOid for unused arg of a unary operator.) - * - * The result includes all necessary quoting and schema-prefixing, - * plus the OPERATOR() decoration needed to use a qualified operator name - * in an expression. - */ -char * -generate_operator_name(Oid operid, Oid arg1, Oid arg2) -{ - StringInfoData buf; - HeapTuple opertup; - Form_pg_operator operform; - char *oprname; - char *nspname; - - initStringInfo(&buf); - - opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operid)); - if (!HeapTupleIsValid(opertup)) - elog(ERROR, "cache lookup failed for operator %u", operid); - operform = (Form_pg_operator) GETSTRUCT(opertup); - oprname = NameStr(operform->oprname); - - /* - * Unlike generate_operator_name() in postgres/src/backend/utils/adt/ruleutils.c, - * we don't check if the operator is in current namespace or not. This is - * because this check is costly when the operator is not in current namespace. - */ - nspname = get_namespace_name(operform->oprnamespace); - Assert(nspname != NULL); - appendStringInfo(&buf, "OPERATOR(%s.", quote_identifier(nspname)); - appendStringInfoString(&buf, oprname); - appendStringInfoChar(&buf, ')'); - - ReleaseSysCache(opertup); - - return buf.data; -} - -/* - * get_one_range_partition_bound_string - * A C string representation of one range partition bound - */ -char * -get_range_partbound_string(List *bound_datums) -{ - deparse_context context; - StringInfo buf = makeStringInfo(); - ListCell *cell; - char *sep; - - memset(&context, 0, sizeof(deparse_context)); - context.buf = buf; - - appendStringInfoString(buf, "("); - sep = ""; - foreach(cell, bound_datums) - { - PartitionRangeDatum *datum = - castNode(PartitionRangeDatum, lfirst(cell)); - - appendStringInfoString(buf, sep); - if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE) - appendStringInfoString(buf, "MINVALUE"); - else if (datum->kind == PARTITION_RANGE_DATUM_MAXVALUE) - appendStringInfoString(buf, "MAXVALUE"); - else - { - Const *val = castNode(Const, datum->value); - - get_const_expr(val, &context, -1); - } - sep = ", "; - } - appendStringInfoChar(buf, ')'); - - return buf->data; -} - -/* - * Collect a list of OIDs of all sequences owned by the specified relation, - * and column if specified. If deptype is not zero, then only find sequences - * with the specified dependency type. - */ -List * -getOwnedSequences_internal(Oid relid, AttrNumber attnum, char deptype) -{ - List *result = NIL; - Relation depRel; - ScanKeyData key[3]; - SysScanDesc scan; - HeapTuple tup; - - depRel = table_open(DependRelationId, AccessShareLock); - - ScanKeyInit(&key[0], - Anum_pg_depend_refclassid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationRelationId)); - ScanKeyInit(&key[1], - Anum_pg_depend_refobjid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(relid)); - if (attnum) - ScanKeyInit(&key[2], - Anum_pg_depend_refobjsubid, - BTEqualStrategyNumber, F_INT4EQ, - Int32GetDatum(attnum)); - - scan = systable_beginscan(depRel, DependReferenceIndexId, true, - NULL, attnum ? 3 : 2, key); - - while (HeapTupleIsValid(tup = systable_getnext(scan))) - { - Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup); - - /* - * We assume any auto or internal dependency of a sequence on a column - * must be what we are looking for. (We need the relkind test because - * indexes can also have auto dependencies on columns.) - */ - if (deprec->classid == RelationRelationId && - deprec->objsubid == 0 && - deprec->refobjsubid != 0 && - (deprec->deptype == DEPENDENCY_AUTO || deprec->deptype == DEPENDENCY_INTERNAL) && - get_rel_relkind(deprec->objid) == RELKIND_SEQUENCE) - { - if (!deptype || deprec->deptype == deptype) - result = lappend_oid(result, deprec->objid); - } - } - - systable_endscan(scan); - - table_close(depRel, AccessShareLock); - - return result; -} - -#endif /* (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) */ diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c index 0c710909b..039475735 100644 --- a/src/backend/distributed/executor/adaptive_executor.c +++ b/src/backend/distributed/executor/adaptive_executor.c @@ -496,11 +496,7 @@ struct TaskPlacementExecution; /* GUC, determining whether Citus opens 1 connection per task */ bool ForceMaxQueryParallelization = false; int MaxAdaptiveExecutorPoolSize = 16; -#if PG_VERSION_NUM >= PG_VERSION_14 bool EnableBinaryProtocol = true; -#else -bool EnableBinaryProtocol = false; -#endif /* GUC, number of ms to wait between opening connections to the same worker */ int ExecutorSlowStartInterval = 10; diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index 04cb39a58..662eaaf97 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -455,9 +455,9 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript location); copyOptions = lappend(copyOptions, copyOption); - CopyFromState copyState = BeginCopyFrom_compat(NULL, stubRelation, NULL, - fileName, false, NULL, - NULL, copyOptions); + CopyFromState copyState = BeginCopyFrom(NULL, stubRelation, NULL, + fileName, false, NULL, + NULL, copyOptions); while (true) { diff --git a/src/backend/distributed/executor/query_stats.c b/src/backend/distributed/executor/query_stats.c index 6dd5196f2..1ac70489c 100644 --- a/src/backend/distributed/executor/query_stats.c +++ b/src/backend/distributed/executor/query_stats.c @@ -797,11 +797,7 @@ BuildExistingQueryIdHash(void) { const int userIdAttributeNumber = 1; const int dbIdAttributeNumber = 2; -#if PG_VERSION_NUM >= PG_VERSION_14 const int queryIdAttributeNumber = 4; -#else - const int queryIdAttributeNumber = 3; -#endif Datum commandTypeDatum = (Datum) 0; bool missingOK = true; diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index adc3fc1ab..c307dc737 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -896,18 +896,11 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) return NULL; } - char *objectDescription = NULL; - char *dependencyDescription = NULL; StringInfo errorInfo = makeStringInfo(); StringInfo detailInfo = makeStringInfo(); - #if PG_VERSION_NUM >= PG_VERSION_14 - objectDescription = getObjectDescription(objectAddress, false); - dependencyDescription = getObjectDescription(undistributableDependency, false); - #else - objectDescription = getObjectDescription(objectAddress); - dependencyDescription = getObjectDescription(undistributableDependency); - #endif + char *objectDescription = getObjectDescription(objectAddress, false); + char *dependencyDescription = getObjectDescription(undistributableDependency, false); /* * We expect callers to interpret the error returned from this function diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index 55d7c9f33..5997480a0 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -85,12 +85,12 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS) { ereport(ERROR, (errmsg("object still exists"), errdetail("the %s \"%s\" still exists", - getObjectTypeDescription_compat(&address, + getObjectTypeDescription(&address, - /* missingOk: */ false), - getObjectIdentity_compat(&address, + /* missingOk: */ false), + getObjectIdentity(&address, - /* missingOk: */ false)), + /* missingOk: */ false)), errhint("drop the object via a DROP command"))); } diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 5f8f76bd6..7dfc30f73 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -916,15 +916,9 @@ MarkObjectsDistributedCreateCommand(List *addresses, int forceDelegation = list_nth_int(forceDelegations, currentObjectCounter); List *names = NIL; List *args = NIL; - char *objectType = NULL; - #if PG_VERSION_NUM >= PG_VERSION_14 - objectType = getObjectTypeDescription(address, false); + char *objectType = getObjectTypeDescription(address, false); getObjectIdentityParts(address, &names, &args, false); - #else - objectType = getObjectTypeDescription(address); - getObjectIdentityParts(address, &names, &args); - #endif if (!isFirstObject) { diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index a6ff93220..53a963029 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -4031,11 +4031,7 @@ CancelTasksForJob(int64 jobid) errmsg("must be a superuser to cancel superuser tasks"))); } else if (!has_privs_of_role(GetUserId(), taskOwner) && -#if PG_VERSION_NUM >= 140000 !has_privs_of_role(GetUserId(), ROLE_PG_SIGNAL_BACKEND)) -#else - !has_privs_of_role(GetUserId(), DEFAULT_ROLE_SIGNAL_BACKENDID)) -#endif { /* user doesn't have the permissions to cancel this job */ ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), diff --git a/src/backend/distributed/operations/worker_node_manager.c b/src/backend/distributed/operations/worker_node_manager.c index 658c83269..76f2732ba 100644 --- a/src/backend/distributed/operations/worker_node_manager.c +++ b/src/backend/distributed/operations/worker_node_manager.c @@ -31,11 +31,7 @@ #include "utils/guc.h" #include "utils/hsearch.h" #include "utils/memutils.h" -#if PG_VERSION_NUM < PG_VERSION_13 -#include "utils/hashutils.h" -#else #include "common/hashfn.h" -#endif /* Config variables managed via guc.c */ diff --git a/src/backend/distributed/operations/worker_shard_copy.c b/src/backend/distributed/operations/worker_shard_copy.c index 00a5413c9..ba65635a7 100644 --- a/src/backend/distributed/operations/worker_shard_copy.c +++ b/src/backend/distributed/operations/worker_shard_copy.c @@ -527,13 +527,13 @@ LocalCopyToShard(ShardCopyDestReceiver *copyDest, CopyOutState localCopyOutState false /* inFromCl */); List *options = (isBinaryCopy) ? list_make1(binaryFormatOption) : NULL; - CopyFromState cstate = BeginCopyFrom_compat(pState, shard, - NULL /* whereClause */, - NULL /* fileName */, - false /* is_program */, - ReadFromLocalBufferCallback, - NULL /* attlist (NULL is all columns) */, - options); + CopyFromState cstate = BeginCopyFrom(pState, shard, + NULL /* whereClause */, + NULL /* fileName */, + false /* is_program */, + ReadFromLocalBufferCallback, + NULL /* attlist (NULL is all columns) */, + options); CopyFrom(cstate); EndCopyFrom(cstate); resetStringInfo(localCopyOutState->fe_msgbuf); diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 84e76c6d4..c59e920b5 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -861,8 +861,8 @@ RouterModifyTaskForShardInterval(Query *originalQuery, * Note that this is only the case with PG14 as the parameter doesn't exist * prior to that. */ - shardRestrictionList = make_simple_restrictinfo_compat(NULL, - (Expr *) shardOpExpressions); + shardRestrictionList = make_simple_restrictinfo(NULL, + (Expr *) shardOpExpressions); extendedBaseRestrictInfo = lappend(extendedBaseRestrictInfo, shardRestrictionList); diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index 1cc3d4102..674077b46 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -1101,8 +1101,8 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS) TupleDesc tupleDescriptor = NULL; Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); DestReceiver *tupleStoreDest = CreateTuplestoreDestReceiver(); - SetTuplestoreDestReceiverParams_compat(tupleStoreDest, tupleStore, - CurrentMemoryContext, false, NULL, NULL); + SetTuplestoreDestReceiverParams(tupleStoreDest, tupleStore, + CurrentMemoryContext, false, NULL, NULL); List *parseTreeList = pg_parse_query(queryString); if (list_length(parseTreeList) != 1) @@ -1126,15 +1126,9 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS) Query *analyzedQuery = parse_analyze_varparams_compat(parseTree, queryString, ¶mTypes, &numParams, NULL); -#if PG_VERSION_NUM >= PG_VERSION_14 - /* pg_rewrite_query is a wrapper around QueryRewrite with some debugging logic */ List *queryList = pg_rewrite_query(analyzedQuery); -#else - /* pg_rewrite_query is not yet public in PostgreSQL 13 */ - List *queryList = QueryRewrite(analyzedQuery); -#endif if (list_length(queryList) != 1) { ereport(ERROR, (errmsg("cannot EXPLAIN ANALYZE a query rewritten " diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index e0548049f..455f050a0 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -1855,11 +1855,7 @@ MasterAggregateExpression(Aggref *originalAggregate, { /* array_cat_agg() takes anyarray as input */ catAggregateName = ARRAY_CAT_AGGREGATE_NAME; -#if PG_VERSION_NUM >= PG_VERSION_14 catInputType = ANYCOMPATIBLEARRAYOID; -#else - catInputType = ANYARRAYOID; -#endif } else if (aggregateType == AGGREGATE_JSONB_AGG || aggregateType == AGGREGATE_JSONB_OBJECT_AGG) @@ -1897,8 +1893,6 @@ MasterAggregateExpression(Aggref *originalAggregate, if (aggregateType == AGGREGATE_ARRAY_AGG) { -#if PG_VERSION_NUM >= PG_VERSION_14 - /* * Postgres expects the type of the array here such as INT4ARRAYOID. * Hence we set it to workerReturnType. If we set this to @@ -1906,9 +1900,6 @@ MasterAggregateExpression(Aggref *originalAggregate, * "argument declared anycompatiblearray is not an array but type anycompatiblearray" */ newMasterAggregate->aggargtypes = list_make1_oid(workerReturnType); -#else - newMasterAggregate->aggargtypes = list_make1_oid(ANYARRAYOID); -#endif } else { @@ -3625,8 +3616,8 @@ static Oid CitusFunctionOidWithSignature(char *functionName, int numargs, Oid *argtypes) { List *aggregateName = list_make2(makeString("pg_catalog"), makeString(functionName)); - FuncCandidateList clist = FuncnameGetCandidates_compat(aggregateName, numargs, NIL, - false, false, false, true); + FuncCandidateList clist = FuncnameGetCandidates(aggregateName, numargs, NIL, + false, false, false, true); for (; clist; clist = clist->next) { diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index 41ae916ad..6ad51e0ae 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -152,10 +152,8 @@ static List * ExtractInsertValuesList(Query *query, Var *partitionColumn); static DeferredErrorMessage * DeferErrorIfUnsupportedRouterPlannableSelectQuery( Query *query); static DeferredErrorMessage * ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree); -#if PG_VERSION_NUM >= PG_VERSION_14 static DeferredErrorMessage * ErrorIfQueryHasCTEWithSearchClause(Query *queryTree); static bool ContainsSearchClauseWalker(Node *node, void *context); -#endif static bool SelectsFromDistributedTable(List *rangeTableList, Query *query); static ShardPlacement * CreateDummyPlacement(bool hasLocalRelation); static ShardPlacement * CreateLocalDummyPlacement(); @@ -1118,14 +1116,12 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer } } -#if PG_VERSION_NUM >= PG_VERSION_14 DeferredErrorMessage *CTEWithSearchClauseError = ErrorIfQueryHasCTEWithSearchClause(originalQuery); if (CTEWithSearchClauseError != NULL) { return CTEWithSearchClauseError; } -#endif return NULL; } @@ -3758,14 +3754,12 @@ DeferErrorIfUnsupportedRouterPlannableSelectQuery(Query *query) NULL, NULL); } -#if PG_VERSION_NUM >= PG_VERSION_14 DeferredErrorMessage *CTEWithSearchClauseError = ErrorIfQueryHasCTEWithSearchClause(query); if (CTEWithSearchClauseError != NULL) { return CTEWithSearchClauseError; } -#endif return ErrorIfQueryHasUnroutableModifyingCTE(query); } @@ -3900,8 +3894,6 @@ ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree) } -#if PG_VERSION_NUM >= PG_VERSION_14 - /* * ErrorIfQueryHasCTEWithSearchClause checks if the query contains any common table * expressions with search clause and errors out if it does. @@ -3948,9 +3940,6 @@ ContainsSearchClauseWalker(Node *node, void *context) } -#endif - - /* * get_all_actual_clauses * diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index ac36842de..b57e37735 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -2143,8 +2143,8 @@ GetRestrictInfoListForRelation(RangeTblEntry *rangeTblEntry, * If the restriction involves multiple tables, we cannot add it to * input relation's expression list. */ - Relids varnos = pull_varnos_compat(relationRestriction->plannerInfo, - (Node *) restrictionClause); + Relids varnos = pull_varnos(relationRestriction->plannerInfo, + (Node *) restrictionClause); if (bms_num_members(varnos) != 1) { continue; diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index e51329f22..550095875 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -1536,7 +1536,7 @@ CreateSubscriptions(MultiConnection *sourceConnection, quote_identifier(target->publication->name), quote_identifier(target->replicationSlot->name)); - if (EnableBinaryProtocol && PG_VERSION_NUM >= PG_VERSION_14) + if (EnableBinaryProtocol) { appendStringInfoString(createSubscriptionCommand, ", binary=true)"); } diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index a67e4c878..907d8e73e 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -1215,11 +1215,7 @@ RegisterCitusConfigVariables(void) "Enables communication between nodes using binary protocol when possible"), NULL, &EnableBinaryProtocol, -#if PG_VERSION_NUM >= PG_VERSION_14 true, -#else - false, -#endif PGC_USERSET, GUC_STANDARD, NULL, NULL, NULL); diff --git a/src/backend/distributed/test/fake_am.c b/src/backend/distributed/test/fake_am.c index 5a8ede316..1654bf095 100644 --- a/src/backend/distributed/test/fake_am.c +++ b/src/backend/distributed/test/fake_am.c @@ -169,7 +169,6 @@ fake_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, } -#if PG_VERSION_NUM >= PG_VERSION_14 static TransactionId fake_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) @@ -179,20 +178,6 @@ fake_index_delete_tuples(Relation rel, } -#else -static TransactionId -fake_compute_xid_horizon_for_tuples(Relation rel, - ItemPointerData *tids, - int nitems) -{ - elog(ERROR, "fake_compute_xid_horizon_for_tuples not implemented"); - return InvalidTransactionId; -} - - -#endif - - /* ---------------------------------------------------------------------------- * Functions for manipulations of physical tuples for fake AM. * ---------------------------------------------------------------------------- @@ -568,11 +553,7 @@ static const TableAmRoutine fake_methods = { .tuple_get_latest_tid = fake_get_latest_tid, .tuple_tid_valid = fake_tuple_tid_valid, .tuple_satisfies_snapshot = fake_tuple_satisfies_snapshot, -#if PG_VERSION_NUM >= PG_VERSION_14 .index_delete_tuples = fake_index_delete_tuples, -#else - .compute_xid_horizon_for_tuples = fake_compute_xid_horizon_for_tuples, -#endif .relation_set_new_filenode = fake_relation_set_new_filenode, .relation_nontransactional_truncate = fake_relation_nontransactional_truncate, diff --git a/src/backend/distributed/test/xact_stats.c b/src/backend/distributed/test/xact_stats.c index c31a17b7f..87e15aa64 100644 --- a/src/backend/distributed/test/xact_stats.c +++ b/src/backend/distributed/test/xact_stats.c @@ -48,8 +48,8 @@ MemoryContextTotalSpace(MemoryContext context) Size totalSpace = 0; MemoryContextCounters totals = { 0 }; - TopTransactionContext->methods->stats_compat(TopTransactionContext, NULL, NULL, - &totals, true); + TopTransactionContext->methods->stats(TopTransactionContext, NULL, NULL, + &totals, true); totalSpace += totals.totalspace; for (MemoryContext child = context->firstchild; diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index fc89fde9a..3e2ea5ca1 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -503,11 +503,7 @@ UserHasPermissionToViewStatsOf(Oid currentUserId, Oid backendOwnedId) } if (is_member_of_role(currentUserId, -#if PG_VERSION_NUM >= PG_VERSION_14 ROLE_PG_READ_ALL_STATS)) -#else - DEFAULT_ROLE_READ_ALL_STATS)) -#endif { return true; } diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index 8c09160b0..0be4bb2e9 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -664,7 +664,7 @@ IsProcessWaitingForSafeOperations(PGPROC *proc) return false; } - if (pgproc_statusflags_compat(proc) & PROC_IS_AUTOVACUUM) + if (proc->statusFlags & PROC_IS_AUTOVACUUM) { return true; } diff --git a/src/backend/distributed/utils/background_jobs.c b/src/backend/distributed/utils/background_jobs.c index 84ef4229f..2b5ce2dca 100644 --- a/src/backend/distributed/utils/background_jobs.c +++ b/src/backend/distributed/utils/background_jobs.c @@ -1436,13 +1436,11 @@ error_severity(int elevel) break; } -#if PG_VERSION_NUM >= PG_VERSION_14 case WARNING_CLIENT_ONLY: { prefix = gettext_noop("WARNING"); break; } -#endif case ERROR: { diff --git a/src/backend/distributed/utils/citus_clauses.c b/src/backend/distributed/utils/citus_clauses.c index c48239548..82900ea1a 100644 --- a/src/backend/distributed/utils/citus_clauses.c +++ b/src/backend/distributed/utils/citus_clauses.c @@ -528,9 +528,9 @@ FixFunctionArgumentsWalker(Node *expr, void *context) elog(ERROR, "cache lookup failed for function %u", funcExpr->funcid); } - funcExpr->args = expand_function_arguments_compat(funcExpr->args, false, - funcExpr->funcresulttype, - func_tuple); + funcExpr->args = expand_function_arguments(funcExpr->args, false, + funcExpr->funcresulttype, + func_tuple); ReleaseSysCache(func_tuple); } diff --git a/src/backend/distributed/utils/enable_ssl.c b/src/backend/distributed/utils/enable_ssl.c index b449fa3e4..cac32f74c 100644 --- a/src/backend/distributed/utils/enable_ssl.c +++ b/src/backend/distributed/utils/enable_ssl.c @@ -19,11 +19,6 @@ * done before including libpq.h. */ #include "distributed/pg_version_constants.h" -#if PG_VERSION_NUM < PG_VERSION_14 -#ifndef OPENSSL_API_COMPAT -#define OPENSSL_API_COMPAT 0x1000100L -#endif -#endif #include "distributed/connection_management.h" #include "distributed/memutils.h" diff --git a/src/backend/distributed/utils/function_utils.c b/src/backend/distributed/utils/function_utils.c index 04750b23f..006e29555 100644 --- a/src/backend/distributed/utils/function_utils.c +++ b/src/backend/distributed/utils/function_utils.c @@ -46,7 +46,7 @@ FunctionOidExtended(const char *schemaName, const char *functionName, int argume const bool findVariadics = false; const bool findDefaults = false; - FuncCandidateList functionList = FuncnameGetCandidates_compat( + FuncCandidateList functionList = FuncnameGetCandidates( qualifiedFunctionNameList, argumentCount, argumentList, diff --git a/src/backend/distributed/utils/listutils.c b/src/backend/distributed/utils/listutils.c index 3279193ef..dd54443c4 100644 --- a/src/backend/distributed/utils/listutils.c +++ b/src/backend/distributed/utils/listutils.c @@ -118,9 +118,7 @@ ListToHashSet(List *itemList, Size keySize, bool isStringList) if (isStringList) { -#if PG_VERSION_NUM >= PG_VERSION_14 flags |= HASH_STRINGS; -#endif } else { diff --git a/src/backend/distributed/utils/log_utils.c b/src/backend/distributed/utils/log_utils.c index ed463c40a..59a090a16 100644 --- a/src/backend/distributed/utils/log_utils.c +++ b/src/backend/distributed/utils/log_utils.c @@ -18,9 +18,7 @@ #include "utils/builtins.h" -#if PG_VERSION_NUM >= PG_VERSION_14 #include "common/cryptohash.h" -#endif /* diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index c5fcd2377..ab36483fd 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -1023,7 +1023,7 @@ IsParentTable(Oid relationId) Oid PartitionParentOid(Oid partitionOid) { - Oid partitionParentOid = get_partition_parent_compat(partitionOid, false); + Oid partitionParentOid = get_partition_parent(partitionOid, false); return partitionParentOid; } @@ -1074,7 +1074,7 @@ PartitionList(Oid parentRelationId) ereport(ERROR, (errmsg("\"%s\" is not a parent table", relationName))); } - PartitionDesc partDesc = RelationGetPartitionDesc_compat(rel, true); + PartitionDesc partDesc = RelationGetPartitionDesc(rel, true); Assert(partDesc != NULL); int partitionCount = partDesc->nparts; @@ -1107,7 +1107,7 @@ GenerateDetachPartitionCommand(Oid partitionTableId) ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName))); } - Oid parentId = get_partition_parent_compat(partitionTableId, false); + Oid parentId = get_partition_parent(partitionTableId, false); char *tableQualifiedName = generate_qualified_relation_name(partitionTableId); char *parentTableQualifiedName = generate_qualified_relation_name(parentId); @@ -1221,7 +1221,7 @@ GenerateAlterTableAttachPartitionCommand(Oid partitionTableId) ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName))); } - Oid parentId = get_partition_parent_compat(partitionTableId, false); + Oid parentId = get_partition_parent(partitionTableId, false); char *tableQualifiedName = generate_qualified_relation_name(partitionTableId); char *parentTableQualifiedName = generate_qualified_relation_name(parentId); diff --git a/src/include/columnar/columnar_version_compat.h b/src/include/columnar/columnar_version_compat.h index c40aa6236..0e0ae3112 100644 --- a/src/include/columnar/columnar_version_compat.h +++ b/src/include/columnar/columnar_version_compat.h @@ -22,29 +22,6 @@ ExecARDeleteTriggers(a, b, c, d, e) #endif -#if PG_VERSION_NUM >= PG_VERSION_14 -#define ColumnarProcessUtility_compat(a, b, c, d, e, f, g, h) \ - ColumnarProcessUtility(a, b, c, d, e, f, g, h) -#define PrevProcessUtilityHook_compat(a, b, c, d, e, f, g, h) \ - PrevProcessUtilityHook(a, b, c, d, e, f, g, h) -#define GetOldestNonRemovableTransactionId_compat(a, b) \ - GetOldestNonRemovableTransactionId(a) -#define ExecSimpleRelationInsert_compat(a, b, c) \ - ExecSimpleRelationInsert(a, b, c) -#define index_insert_compat(a, b, c, d, e, f, g, h) \ - index_insert(a, b, c, d, e, f, g, h) -#else -#define ColumnarProcessUtility_compat(a, b, c, d, e, f, g, h) \ - ColumnarProcessUtility(a, b, d, e, f, g, h) -#define PrevProcessUtilityHook_compat(a, b, c, d, e, f, g, h) \ - PrevProcessUtilityHook(a, b, d, e, f, g, h) -#define GetOldestNonRemovableTransactionId_compat(a, b) GetOldestXmin(a, b) -#define ExecSimpleRelationInsert_compat(a, b, c) \ - ExecSimpleRelationInsert(b, c) -#define index_insert_compat(a, b, c, d, e, f, g, h) \ - index_insert(a, b, c, d, e, f, h) -#endif - #define ACLCHECK_OBJECT_TABLE OBJECT_TABLE #define ExplainPropertyLong(qlabel, value, es) \ diff --git a/src/include/distributed/commands/multi_copy.h b/src/include/distributed/commands/multi_copy.h index 70f93cfb9..4255c952d 100644 --- a/src/include/distributed/commands/multi_copy.h +++ b/src/include/distributed/commands/multi_copy.h @@ -31,12 +31,7 @@ typedef enum CitusCopyDest { COPY_FILE, /* to/from file (or a piped program) */ -#if PG_VERSION_NUM >= PG_VERSION_14 COPY_FRONTEND, /* to frontend */ -#else - COPY_OLD_FE, /* to/from frontend (2.0 protocol) */ - COPY_NEW_FE, /* to/from frontend (3.0 protocol) */ -#endif COPY_CALLBACK /* to/from callback function */ } CitusCopyDest; diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 7229f7c72..f02f83fe3 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -79,9 +79,7 @@ typedef struct DDLJob extern ProcessUtility_hook_type PrevProcessUtility; extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString, -#if PG_VERSION_NUM >= PG_VERSION_14 bool readOnlyTree, -#endif ProcessUtilityContext context, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *completionTag diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index 96bed3457..f08124123 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -353,7 +353,4 @@ extern bool CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, extern double MillisecondsPassedSince(instr_time moment); extern long MillisecondsToTimeout(instr_time start, long msAfterStart); -#if PG_VERSION_NUM < 140000 -extern void WarmUpConnParamsHash(void); -#endif #endif /* CONNECTION_MANAGMENT_H */ diff --git a/src/include/distributed/pg_version_constants.h b/src/include/distributed/pg_version_constants.h index 83b1071dd..a85d72d84 100644 --- a/src/include/distributed/pg_version_constants.h +++ b/src/include/distributed/pg_version_constants.h @@ -11,7 +11,6 @@ #ifndef PG_VERSION_CONSTANTS #define PG_VERSION_CONSTANTS -#define PG_VERSION_13 130000 #define PG_VERSION_14 140000 #define PG_VERSION_15 150000 #define PG_VERSION_16 160000 diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index eb81bca43..00c5e286b 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -61,8 +61,7 @@ pg_strtoint64(char *s) * We want to use it in all versions. So we backport it ourselves in earlier * versions, and rely on the Postgres provided version in the later versions. */ -#if PG_VERSION_NUM >= PG_VERSION_13 && PG_VERSION_NUM < 130010 \ - || PG_VERSION_NUM >= PG_VERSION_14 && PG_VERSION_NUM < 140007 +#if PG_VERSION_NUM < 140007 static inline SMgrRelation RelationGetSmgr(Relation rel) { @@ -84,67 +83,6 @@ RelationGetSmgr(Relation rel) #endif -#if PG_VERSION_NUM >= PG_VERSION_14 -#define AlterTableStmtObjType_compat(a) ((a)->objtype) -#define getObjectTypeDescription_compat(a, b) getObjectTypeDescription(a, b) -#define getObjectIdentity_compat(a, b) getObjectIdentity(a, b) - -/* for MemoryContextMethods->stats */ -#define stats_compat(a, b, c, d, e) stats(a, b, c, d, e) -#define FuncnameGetCandidates_compat(a, b, c, d, e, f, g) \ - FuncnameGetCandidates(a, b, c, d, e, f, g) -#define expand_function_arguments_compat(a, b, c, d) expand_function_arguments(a, b, c, d) -#define BeginCopyFrom_compat(a, b, c, d, e, f, g, h) BeginCopyFrom(a, b, c, d, e, f, g, h) -#define standard_ProcessUtility_compat(a, b, c, d, e, f, g, h) \ - standard_ProcessUtility(a, b, c, d, e, f, g, h) -#define ProcessUtility_compat(a, b, c, d, e, f, g, h) \ - ProcessUtility(a, b, c, d, e, f, g, h) -#define PrevProcessUtility_compat(a, b, c, d, e, f, g, h) \ - PrevProcessUtility(a, b, c, d, e, f, g, h) -#define SetTuplestoreDestReceiverParams_compat(a, b, c, d, e, f) \ - SetTuplestoreDestReceiverParams(a, b, c, d, e, f) -#define pgproc_statusflags_compat(pgproc) ((pgproc)->statusFlags) -#define get_partition_parent_compat(a, b) get_partition_parent(a, b) -#define RelationGetPartitionDesc_compat(a, b) RelationGetPartitionDesc(a, b) -#define make_simple_restrictinfo_compat(a, b) make_simple_restrictinfo(a, b) -#define pull_varnos_compat(a, b) pull_varnos(a, b) -#else -#define AlterTableStmtObjType_compat(a) ((a)->relkind) -#define F_NEXTVAL F_NEXTVAL_OID -#define ROLE_PG_MONITOR DEFAULT_ROLE_MONITOR -#define PROC_WAIT_STATUS_WAITING STATUS_WAITING -#define getObjectTypeDescription_compat(a, b) getObjectTypeDescription(a) -#define getObjectIdentity_compat(a, b) getObjectIdentity(a) - -/* for MemoryContextMethods->stats */ -#define stats_compat(a, b, c, d, e) stats(a, b, c, d) -#define FuncnameGetCandidates_compat(a, b, c, d, e, f, g) \ - FuncnameGetCandidates(a, b, c, d, e, g) -#define expand_function_arguments_compat(a, b, c, d) expand_function_arguments(a, c, d) -#define VacOptValue VacOptTernaryValue -#define VACOPTVALUE_UNSPECIFIED VACOPT_TERNARY_DEFAULT -#define VACOPTVALUE_DISABLED VACOPT_TERNARY_DISABLED -#define VACOPTVALUE_ENABLED VACOPT_TERNARY_ENABLED -#define CopyFromState CopyState -#define BeginCopyFrom_compat(a, b, c, d, e, f, g, h) BeginCopyFrom(a, b, d, e, f, g, h) -#define standard_ProcessUtility_compat(a, b, c, d, e, f, g, h) \ - standard_ProcessUtility(a, b, d, e, f, g, h) -#define ProcessUtility_compat(a, b, c, d, e, f, g, h) ProcessUtility(a, b, d, e, f, g, h) -#define PrevProcessUtility_compat(a, b, c, d, e, f, g, h) \ - PrevProcessUtility(a, b, d, e, f, g, h) -#define COPY_FRONTEND COPY_NEW_FE -#define SetTuplestoreDestReceiverParams_compat(a, b, c, d, e, f) \ - SetTuplestoreDestReceiverParams(a, b, c, d) -#define pgproc_statusflags_compat(pgproc) \ - ((&ProcGlobal->allPgXact[(pgproc)->pgprocno])->vacuumFlags) -#define get_partition_parent_compat(a, b) get_partition_parent(a) -#define RelationGetPartitionDesc_compat(a, b) RelationGetPartitionDesc(a) -#define PQ_LARGE_MESSAGE_LIMIT 0 -#define make_simple_restrictinfo_compat(a, b) make_simple_restrictinfo(b) -#define pull_varnos_compat(a, b) pull_varnos(b) -#define ROLE_PG_READ_ALL_STATS DEFAULT_ROLE_READ_ALL_STATS -#endif - #define SetListCellPtr(a, b) ((a)->ptr_value = (b)) #define RangeTableEntryFromNSItem(a) ((a)->p_rte) #define fcGetArgValue(fc, n) ((fc)->args[n].value) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 0370f4e98..a374c73f5 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -98,34 +98,7 @@ s/of relation ".*" violates not-null constraint/violates not-null constraint/g s/partition ".*" would be violated by some row/partition would be violated by some row/g s/of relation ".*" contains null values/contains null values/g -#if (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) -# (This is not preprocessor directive, but a reminder for the developer that will drop PG13 support ) -# libpq message changes for minor versions of pg13 - -# We ignore multiline error messages, and substitute first line with a single line -# alternative that is used in some older libpq versions. -s/(ERROR: |WARNING: |error:) server closed the connection unexpectedly/\1 connection not open/g -/^\s*This probably means the server terminated abnormally$/d -/^\s*before or while processing the request.$/d -/^\s*connection not open$/d - -s/ERROR: fake_fetch_row_version not implemented/ERROR: fake_tuple_update not implemented/g -s/ERROR: COMMIT is not allowed in an SQL function/ERROR: COMMIT is not allowed in a SQL function/g -s/ERROR: ROLLBACK is not allowed in an SQL function/ERROR: ROLLBACK is not allowed in a SQL function/g -/.*Async-Capable.*/d -/.*Async Capable.*/d -/Parent Relationship/d -/Parent-Relationship/d -s/function array_cat_agg\(anyarray\) anyarray/function array_cat_agg\(anycompatiblearray\) anycompatiblearray/g -s/function array_cat_agg\(anyarray\)/function array_cat_agg\(anycompatiblearray\)/g -s/TRIM\(BOTH FROM value\)/btrim\(value\)/g -/DETAIL: Subqueries are not supported in policies on distributed tables/d -s/ERROR: unexpected non-SELECT command in SubLink/ERROR: cannot create policy/g - -# PG13 changes bgworker sigterm message, we can drop that line with PG13 drop -s/(FATAL: terminating).*Citus Background Task Queue Executor.*(due to administrator command)\+/\1 connection \2 \+/g - -#endif /* (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) */ +s/(Citus Background Task Queue Executor: regression\/postgres for \()[0-9]+\/[0-9]+\)/\1xxxxx\/xxxxx\)/g # Changed outputs after minor bump to PG14.5 and PG13.8 s/(ERROR: |WARNING: |error:) invalid socket/\1 connection not open/g @@ -135,9 +108,18 @@ s/(ERROR: |WARNING: |error:) invalid socket/\1 connection not open/g # pg15 changes # can be removed when dropping PG13&14 support +#if (PG_VERSION_NUM >= PG_VERSION_14) && (PG_VERSION_NUM < PG_VERSION_15) +# (This is not preprocessor directive, but a reminder for the developer that will drop PG14 support ) s/is not a PostgreSQL server process/is not a PostgreSQL backend process/g s/ AS "\?column\?"//g s/".*\.(.*)": (found .* removable)/"\1": \2/g +# We ignore multiline error messages, and substitute first line with a single line +# alternative that is used in some older libpq versions. +s/(ERROR: |WARNING: |error:) server closed the connection unexpectedly/\1 connection not open/g +/^\s*This probably means the server terminated abnormally$/d +/^\s*before or while processing the request.$/d +/^\s*connection not open$/d +#endif /* (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) */ # intermediate_results s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index b41ba35cc..8e1e1c91e 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -166,6 +166,7 @@ DEPS = { "multi_table_ddl", ], ), + "grant_on_schema_propagation": TestDeps("minimal_schedule"), } diff --git a/src/test/regress/expected/background_task_queue_monitor.out b/src/test/regress/expected/background_task_queue_monitor.out index 2b4f7de37..1d4006377 100644 --- a/src/test/regress/expected/background_task_queue_monitor.out +++ b/src/test/regress/expected/background_task_queue_monitor.out @@ -495,11 +495,11 @@ SELECT task_id, status, retry_count, message FROM pg_dist_background_task ORDER BY task_id; -- show that all tasks are runnable by retry policy after termination signal task_id | status | retry_count | message --------------------------------------------------------------------- - 1450019 | runnable | 1 | FATAL: terminating connection due to administrator command + - | | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (1450011/1450019) + + 1450019 | runnable | 1 | FATAL: terminating background worker "Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx)" due to administrator command+ + | | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx) + | | | - 1450020 | runnable | 1 | FATAL: terminating connection due to administrator command + - | | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (1450012/1450020) + + 1450020 | runnable | 1 | FATAL: terminating background worker "Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx)" due to administrator command+ + | | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx) + | | | (2 rows) diff --git a/src/test/regress/expected/cpu_priority.out b/src/test/regress/expected/cpu_priority.out index ad05e09f5..04bd8e0f4 100644 --- a/src/test/regress/expected/cpu_priority.out +++ b/src/test/regress/expected/cpu_priority.out @@ -85,17 +85,14 @@ SET search_path TO cpu_priority; -- in their CREATE SUBSCRIPTION commands. SET citus.log_remote_commands TO ON; SET citus.grep_remote_commands = '%CREATE SUBSCRIPTION%'; --- We disable binary protocol, so we have consistent output between PG13 and --- PG14, beacuse PG13 doesn't support binary logical replication. -SET citus.enable_binary_protocol = false; SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx master_move_shard_placement --------------------------------------------------------------------- @@ -104,13 +101,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SET citus.cpu_priority_for_logical_replication_senders = 15; SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx master_move_shard_placement --------------------------------------------------------------------- @@ -119,13 +116,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SET citus.max_high_priority_background_processes = 3; SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx master_move_shard_placement --------------------------------------------------------------------- @@ -145,21 +142,21 @@ SELECT pg_catalog.citus_split_shard_by_split_points( ARRAY['-1500000000'], ARRAY[:worker_1_node, :worker_2_node], 'force_logical'); -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) +NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx citus_split_shard_by_split_points --------------------------------------------------------------------- diff --git a/src/test/regress/expected/generated_identity.out b/src/test/regress/expected/generated_identity.out index 617a8fee6..8fe7a0dc6 100644 --- a/src/test/regress/expected/generated_identity.out +++ b/src/test/regress/expected/generated_identity.out @@ -1,11 +1,3 @@ --- This test file has an alternative output because of error messages vary for PG13 -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13; - server_version_le_13 ---------------------------------------------------------------------- - f -(1 row) - CREATE SCHEMA generated_identities; SET search_path TO generated_identities; SET client_min_messages to ERROR; diff --git a/src/test/regress/expected/generated_identity_0.out b/src/test/regress/expected/generated_identity_0.out deleted file mode 100644 index 1bff7f68f..000000000 --- a/src/test/regress/expected/generated_identity_0.out +++ /dev/null @@ -1,431 +0,0 @@ --- This test file has an alternative output because of error messages vary for PG13 -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13; - server_version_le_13 ---------------------------------------------------------------------- - t -(1 row) - -CREATE SCHEMA generated_identities; -SET search_path TO generated_identities; -SET client_min_messages to ERROR; -SET citus.shard_replication_factor TO 1; -SELECT 1 from citus_add_node('localhost', :master_port, groupId=>0); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- smallint identity column can not be distributed -CREATE TABLE smallint_identity_column ( - a smallint GENERATED BY DEFAULT AS IDENTITY -); -SELECT create_distributed_table('smallint_identity_column', 'a'); -ERROR: cannot complete operation on generated_identities.smallint_identity_column with smallint/int identity column -HINT: Use bigint identity column instead. -SELECT create_distributed_table_concurrently('smallint_identity_column', 'a'); -ERROR: cannot complete operation on generated_identities.smallint_identity_column with smallint/int identity column -HINT: Use bigint identity column instead. -SELECT create_reference_table('smallint_identity_column'); -ERROR: cannot complete operation on a table with identity column -SELECT citus_add_local_table_to_metadata('smallint_identity_column'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE smallint_identity_column; --- int identity column can not be distributed -CREATE TABLE int_identity_column ( - a int GENERATED BY DEFAULT AS IDENTITY -); -SELECT create_distributed_table('int_identity_column', 'a'); -ERROR: cannot complete operation on generated_identities.int_identity_column with smallint/int identity column -HINT: Use bigint identity column instead. -SELECT create_distributed_table_concurrently('int_identity_column', 'a'); -ERROR: cannot complete operation on generated_identities.int_identity_column with smallint/int identity column -HINT: Use bigint identity column instead. -SELECT create_reference_table('int_identity_column'); -ERROR: cannot complete operation on a table with identity column -SELECT citus_add_local_table_to_metadata('int_identity_column'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE int_identity_column; -RESET citus.shard_replication_factor; -CREATE TABLE bigint_identity_column ( - a bigint GENERATED BY DEFAULT AS IDENTITY, - b int -); -SELECT citus_add_local_table_to_metadata('bigint_identity_column'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE bigint_identity_column; -CREATE TABLE bigint_identity_column ( - a bigint GENERATED BY DEFAULT AS IDENTITY, - b int -); -SELECT create_distributed_table('bigint_identity_column', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -\d bigint_identity_column - Table "generated_identities.bigint_identity_column" - Column | Type | Collation | Nullable | Default ---------------------------------------------------------------------- - a | bigint | | not null | generated by default as identity - b | integer | | | - -\c - - - :worker_1_port -SET search_path TO generated_identities; -SET client_min_messages to ERROR; -INSERT INTO bigint_identity_column (b) -SELECT s FROM generate_series(1,10) s; -\d generated_identities.bigint_identity_column - Table "generated_identities.bigint_identity_column" - Column | Type | Collation | Nullable | Default ---------------------------------------------------------------------- - a | bigint | | not null | generated by default as identity - b | integer | | | - -\c - - - :master_port -SET search_path TO generated_identities; -SET client_min_messages to ERROR; -INSERT INTO bigint_identity_column (b) -SELECT s FROM generate_series(11,20) s; -SELECT * FROM bigint_identity_column ORDER BY B ASC; - a | b ---------------------------------------------------------------------- - 3940649673949185 | 1 - 3940649673949186 | 2 - 3940649673949187 | 3 - 3940649673949188 | 4 - 3940649673949189 | 5 - 3940649673949190 | 6 - 3940649673949191 | 7 - 3940649673949192 | 8 - 3940649673949193 | 9 - 3940649673949194 | 10 - 1 | 11 - 2 | 12 - 3 | 13 - 4 | 14 - 5 | 15 - 6 | 16 - 7 | 17 - 8 | 18 - 9 | 19 - 10 | 20 -(20 rows) - --- table with identity column cannot be altered. -SELECT alter_distributed_table('bigint_identity_column', 'b'); -ERROR: cannot complete operation on a table with identity column --- table with identity column cannot be undistributed. -SELECT undistribute_table('bigint_identity_column'); -ERROR: cannot complete operation on a table with identity column -DROP TABLE bigint_identity_column; --- create a partitioned table for testing. -CREATE TABLE partitioned_table ( - a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10), - b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10), - c int -) -PARTITION BY RANGE (c); -CREATE TABLE partitioned_table_1_50 PARTITION OF partitioned_table FOR VALUES FROM (1) TO (50); -CREATE TABLE partitioned_table_50_500 PARTITION OF partitioned_table FOR VALUES FROM (50) TO (1000); -SELECT create_distributed_table('partitioned_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -\d partitioned_table - Partitioned table "generated_identities.partitioned_table" - Column | Type | Collation | Nullable | Default ---------------------------------------------------------------------- - a | bigint | | not null | generated by default as identity - b | bigint | | not null | generated always as identity - c | integer | | | -Partition key: RANGE (c) -Number of partitions: 2 (Use \d+ to list them.) - -\c - - - :worker_1_port -SET search_path TO generated_identities; -SET client_min_messages to ERROR; -\d generated_identities.partitioned_table - Partitioned table "generated_identities.partitioned_table" - Column | Type | Collation | Nullable | Default ---------------------------------------------------------------------- - a | bigint | | not null | generated by default as identity - b | bigint | | not null | generated always as identity - c | integer | | | -Partition key: RANGE (c) -Number of partitions: 2 (Use \d+ to list them.) - -insert into partitioned_table (c) values (1); -insert into partitioned_table (c) SELECT 2; -INSERT INTO partitioned_table (c) -SELECT s FROM generate_series(3,7) s; -\c - - - :master_port -SET search_path TO generated_identities; -SET client_min_messages to ERROR; -INSERT INTO partitioned_table (c) -SELECT s FROM generate_series(10,20) s; -INSERT INTO partitioned_table (a,c) VALUES (998,998); -INSERT INTO partitioned_table (a,b,c) OVERRIDING SYSTEM VALUE VALUES (999,999,999); -SELECT * FROM partitioned_table ORDER BY c ASC; - a | b | c ---------------------------------------------------------------------- - 3940649673949185 | 3940649673949185 | 1 - 3940649673949195 | 3940649673949195 | 2 - 3940649673949205 | 3940649673949205 | 3 - 3940649673949215 | 3940649673949215 | 4 - 3940649673949225 | 3940649673949225 | 5 - 3940649673949235 | 3940649673949235 | 6 - 3940649673949245 | 3940649673949245 | 7 - 10 | 10 | 10 - 20 | 20 | 11 - 30 | 30 | 12 - 40 | 40 | 13 - 50 | 50 | 14 - 60 | 60 | 15 - 70 | 70 | 16 - 80 | 80 | 17 - 90 | 90 | 18 - 100 | 100 | 19 - 110 | 110 | 20 - 998 | 120 | 998 - 999 | 999 | 999 -(20 rows) - --- alter table .. alter column .. add is unsupported -ALTER TABLE partitioned_table ALTER COLUMN g ADD GENERATED ALWAYS AS IDENTITY; -ERROR: alter table command is currently unsupported -DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. --- alter table .. alter column is unsupported -ALTER TABLE partitioned_table ALTER COLUMN b TYPE int; -ERROR: cannot execute ALTER COLUMN command involving identity column -DROP TABLE partitioned_table; --- create a table for reference table testing. -CREATE TABLE reference_table ( - a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10), - b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10) UNIQUE, - c int -); -SELECT create_reference_table('reference_table'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -\d reference_table - Table "generated_identities.reference_table" - Column | Type | Collation | Nullable | Default ---------------------------------------------------------------------- - a | bigint | | not null | generated by default as identity - b | bigint | | not null | generated always as identity - c | integer | | | -Indexes: - "reference_table_b_key" UNIQUE CONSTRAINT, btree (b) - -\c - - - :worker_1_port -SET search_path TO generated_identities; -\d generated_identities.reference_table - Table "generated_identities.reference_table" - Column | Type | Collation | Nullable | Default ---------------------------------------------------------------------- - a | bigint | | not null | generated by default as identity - b | bigint | | not null | generated always as identity - c | integer | | | -Indexes: - "reference_table_b_key" UNIQUE CONSTRAINT, btree (b) - -INSERT INTO reference_table (c) -SELECT s FROM generate_series(1,10) s; ---on master -select * from reference_table; - a | b | c ---------------------------------------------------------------------- - 3940649673949185 | 3940649673949185 | 1 - 3940649673949195 | 3940649673949195 | 2 - 3940649673949205 | 3940649673949205 | 3 - 3940649673949215 | 3940649673949215 | 4 - 3940649673949225 | 3940649673949225 | 5 - 3940649673949235 | 3940649673949235 | 6 - 3940649673949245 | 3940649673949245 | 7 - 3940649673949255 | 3940649673949255 | 8 - 3940649673949265 | 3940649673949265 | 9 - 3940649673949275 | 3940649673949275 | 10 -(10 rows) - -\c - - - :master_port -SET search_path TO generated_identities; -SET client_min_messages to ERROR; -INSERT INTO reference_table (c) -SELECT s FROM generate_series(11,20) s; -SELECT * FROM reference_table ORDER BY c ASC; - a | b | c ---------------------------------------------------------------------- - 3940649673949185 | 3940649673949185 | 1 - 3940649673949195 | 3940649673949195 | 2 - 3940649673949205 | 3940649673949205 | 3 - 3940649673949215 | 3940649673949215 | 4 - 3940649673949225 | 3940649673949225 | 5 - 3940649673949235 | 3940649673949235 | 6 - 3940649673949245 | 3940649673949245 | 7 - 3940649673949255 | 3940649673949255 | 8 - 3940649673949265 | 3940649673949265 | 9 - 3940649673949275 | 3940649673949275 | 10 - 10 | 10 | 11 - 20 | 20 | 12 - 30 | 30 | 13 - 40 | 40 | 14 - 50 | 50 | 15 - 60 | 60 | 16 - 70 | 70 | 17 - 80 | 80 | 18 - 90 | 90 | 19 - 100 | 100 | 20 -(20 rows) - -DROP TABLE reference_table; -CREATE TABLE color ( - color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE, - color_name VARCHAR NOT NULL -); --- https://github.com/citusdata/citus/issues/6694 -CREATE USER identity_test_user; -GRANT INSERT ON color TO identity_test_user; -GRANT USAGE ON SCHEMA generated_identities TO identity_test_user; -SET ROLE identity_test_user; -SELECT create_distributed_table('color', 'color_id'); -ERROR: must be owner of table color -SET ROLE postgres; -SET citus.shard_replication_factor TO 1; -SELECT create_distributed_table_concurrently('color', 'color_id'); - create_distributed_table_concurrently ---------------------------------------------------------------------- - -(1 row) - -RESET citus.shard_replication_factor; -\c - identity_test_user - :worker_1_port -SET search_path TO generated_identities; -SET client_min_messages to ERROR; -INSERT INTO color(color_name) VALUES ('Blue'); -\c - postgres - :master_port -SET search_path TO generated_identities; -SET client_min_messages to ERROR; -SET citus.next_shard_id TO 12400000; -DROP TABLE Color; -CREATE TABLE color ( - color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE, - color_name VARCHAR NOT NULL -) USING columnar; -SELECT create_distributed_table('color', 'color_id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO color(color_name) VALUES ('Blue'); -\d+ color - Table "generated_identities.color" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------------------------------------------------------------------- - color_id | bigint | | not null | generated always as identity | plain | | - color_name | character varying | | not null | | extended | | -Indexes: - "color_color_id_key" UNIQUE CONSTRAINT, btree (color_id) - -\c - - - :worker_1_port -SET search_path TO generated_identities; -\d+ color - Table "generated_identities.color" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------------------------------------------------------------------- - color_id | bigint | | not null | generated always as identity | plain | | - color_name | character varying | | not null | | extended | | -Indexes: - "color_color_id_key" UNIQUE CONSTRAINT, btree (color_id) - -INSERT INTO color(color_name) VALUES ('Red'); --- alter sequence .. restart -ALTER SEQUENCE color_color_id_seq RESTART WITH 1000; -ERROR: Altering a distributed sequence is currently not supported. --- override system value -INSERT INTO color(color_id, color_name) VALUES (1, 'Red'); -ERROR: cannot insert into column "color_id" -DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS. -HINT: Use OVERRIDING SYSTEM VALUE to override. -INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red'); -ERROR: cannot insert into column "color_id" -DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS. -HINT: Use OVERRIDING SYSTEM VALUE to override. -INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red'); -ERROR: duplicate key value violates unique constraint "color_color_id_key_12400000" -DETAIL: Key (color_id)=(1) already exists. -CONTEXT: while executing command on localhost:xxxxx --- update null or custom value -UPDATE color SET color_id = NULL; -ERROR: column "color_id" can only be updated to DEFAULT -DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS. -UPDATE color SET color_id = 1; -ERROR: column "color_id" can only be updated to DEFAULT -DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS. -\c - postgres - :master_port -SET search_path TO generated_identities; -SET client_min_messages to ERROR; --- alter table .. add column .. GENERATED .. AS IDENTITY -ALTER TABLE color ADD COLUMN color_id BIGINT GENERATED ALWAYS AS IDENTITY; -ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers --- alter sequence .. restart -ALTER SEQUENCE color_color_id_seq RESTART WITH 1000; -ERROR: Altering a distributed sequence is currently not supported. --- override system value -INSERT INTO color(color_id, color_name) VALUES (1, 'Red'); -ERROR: cannot insert into column "color_id" -DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS. -HINT: Use OVERRIDING SYSTEM VALUE to override. -INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red'); -ERROR: cannot insert into column "color_id" -DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS. -HINT: Use OVERRIDING SYSTEM VALUE to override. -INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red'); -ERROR: duplicate key value violates unique constraint "color_color_id_key_12400000" -DETAIL: Key (color_id)=(1) already exists. -CONTEXT: while executing command on localhost:xxxxx --- update null or custom value -UPDATE color SET color_id = NULL; -ERROR: column "color_id" can only be updated to DEFAULT -DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS. -UPDATE color SET color_id = 1; -ERROR: column "color_id" can only be updated to DEFAULT -DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS. -DROP TABLE IF EXISTS test; -CREATE TABLE test (x int, y int, z bigint generated by default as identity); -SELECT create_distributed_table('test', 'x', colocate_with := 'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO test VALUES (1,2); -INSERT INTO test SELECT x, y FROM test WHERE x = 1; -SELECT * FROM test; - x | y | z ---------------------------------------------------------------------- - 1 | 2 | 1 - 1 | 2 | 2 -(2 rows) - -DROP SCHEMA generated_identities CASCADE; -DROP USER identity_test_user; diff --git a/src/test/regress/expected/grant_on_schema_propagation.out b/src/test/regress/expected/grant_on_schema_propagation.out index 410865d49..77447c2dd 100644 --- a/src/test/regress/expected/grant_on_schema_propagation.out +++ b/src/test/regress/expected/grant_on_schema_propagation.out @@ -1,7 +1,7 @@ -- -- GRANT_ON_SCHEMA_PROPAGATION -- --- this test has different output for PG13/14 compared to PG15 +-- this test has different output for PG14 compared to PG15 -- In PG15, public schema is owned by pg_database_owner role -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 SHOW server_version \gset @@ -327,6 +327,8 @@ SELECT master_remove_node('localhost', :worker_2_port); (1 row) +-- to avoid different output in PG15 +GRANT CREATE ON SCHEMA public TO public; -- distribute the public schema (it has to be distributed by now but just in case) CREATE TABLE public_schema_table (id INT); SELECT create_distributed_table('public_schema_table', 'id'); diff --git a/src/test/regress/expected/grant_on_schema_propagation_0.out b/src/test/regress/expected/grant_on_schema_propagation_0.out index 6b8b782ca..9806a0dbd 100644 --- a/src/test/regress/expected/grant_on_schema_propagation_0.out +++ b/src/test/regress/expected/grant_on_schema_propagation_0.out @@ -1,7 +1,7 @@ -- -- GRANT_ON_SCHEMA_PROPAGATION -- --- this test has different output for PG13/14 compared to PG15 +-- this test has different output for PG14 compared to PG15 -- In PG15, public schema is owned by pg_database_owner role -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 SHOW server_version \gset @@ -327,6 +327,8 @@ SELECT master_remove_node('localhost', :worker_2_port); (1 row) +-- to avoid different output in PG15 +GRANT CREATE ON SCHEMA public TO public; -- distribute the public schema (it has to be distributed by now but just in case) CREATE TABLE public_schema_table (id INT); SELECT create_distributed_table('public_schema_table', 'id'); diff --git a/src/test/regress/expected/isolation_master_update_node_1.out b/src/test/regress/expected/isolation_master_update_node_1.out deleted file mode 100644 index 194299c4d..000000000 --- a/src/test/regress/expected/isolation_master_update_node_1.out +++ /dev/null @@ -1,66 +0,0 @@ -Parsed test spec with 2 sessions - -starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-begin: BEGIN; -step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); -step s2-begin: BEGIN; -step s2-update-node-1: - -- update a specific node by address - SELECT master_update_node(nodeid, 'localhost', nodeport + 10) - FROM pg_dist_node - WHERE nodename = 'localhost' - AND nodeport = 57637; - -step s1-abort: ABORT; -step s2-update-node-1: <... completed> -master_update_node ---------------------------------------------------------------------- - -(1 row) - -step s2-abort: ABORT; -master_remove_node ---------------------------------------------------------------------- - - -(2 rows) - - -starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-begin: BEGIN; -step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); -step s2-begin: BEGIN; -step s2-update-node-1-force: - -- update a specific node by address (force) - SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100) - FROM pg_dist_node - WHERE nodename = 'localhost' - AND nodeport = 57637; - -step s2-update-node-1-force: <... completed> -master_update_node ---------------------------------------------------------------------- - -(1 row) - -step s2-abort: ABORT; -step s1-abort: ABORT; -FATAL: terminating connection due to administrator command -server closed the connection unexpectedly - -master_remove_node ---------------------------------------------------------------------- - - -(2 rows) - diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index f77af42da..e70dc1102 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -1200,7 +1200,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1209,7 +1209,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1218,7 +1218,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1227,7 +1227,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1236,7 +1236,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1245,7 +1245,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1254,7 +1254,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1263,7 +1263,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 diff --git a/src/test/regress/expected/local_shard_execution_0.out b/src/test/regress/expected/local_shard_execution_0.out index 5350728aa..c7f002cad 100644 --- a/src/test/regress/expected/local_shard_execution_0.out +++ b/src/test/regress/expected/local_shard_execution_0.out @@ -1200,7 +1200,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1209,7 +1209,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1218,7 +1218,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1227,7 +1227,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1236,7 +1236,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1245,7 +1245,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1254,7 +1254,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1263,7 +1263,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 diff --git a/src/test/regress/expected/local_shard_execution_replicated.out b/src/test/regress/expected/local_shard_execution_replicated.out index 7d36a5559..07da961c2 100644 --- a/src/test/regress/expected/local_shard_execution_replicated.out +++ b/src/test/regress/expected/local_shard_execution_replicated.out @@ -1187,7 +1187,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1196,7 +1196,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1205,7 +1205,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1214,7 +1214,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1223,7 +1223,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1232,7 +1232,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1241,7 +1241,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1250,7 +1250,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 diff --git a/src/test/regress/expected/local_shard_execution_replicated_0.out b/src/test/regress/expected/local_shard_execution_replicated_0.out index 759d842fd..c913bf628 100644 --- a/src/test/regress/expected/local_shard_execution_replicated_0.out +++ b/src/test/regress/expected/local_shard_execution_replicated_0.out @@ -1187,7 +1187,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1196,7 +1196,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1205,7 +1205,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1214,7 +1214,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1223,7 +1223,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1232,7 +1232,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1241,7 +1241,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 @@ -1250,7 +1250,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR EXECUTE local_prepare_no_param_subquery; NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint -NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t +NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t btrim --------------------------------------------------------------------- 12 diff --git a/src/test/regress/expected/multi_alter_table_row_level_security.out b/src/test/regress/expected/multi_alter_table_row_level_security.out index 962b037fc..d82f76a25 100644 --- a/src/test/regress/expected/multi_alter_table_row_level_security.out +++ b/src/test/regress/expected/multi_alter_table_row_level_security.out @@ -538,6 +538,7 @@ CREATE POLICY fp_s ON information FOR SELECT -- this attempt for distribution fails because the table has a disallowed expression SELECT create_distributed_table('information', 'group_id'); ERROR: cannot create policy +DETAIL: Subqueries are not supported in policies on distributed tables -- DROP the expression so we can distribute the table DROP POLICY fp_s ON information; SELECT create_distributed_table('information', 'group_id'); @@ -549,7 +550,7 @@ SELECT create_distributed_table('information', 'group_id'); -- Try and create the expression on a distributed table, this should also fail CREATE POLICY fp_s ON information FOR SELECT USING (group_id <= (SELECT group_id FROM users WHERE user_name = current_user)); -ERROR: cannot create policy +ERROR: unexpected non-SELECT command in SubLink -- Clean up test DROP TABLE information, groups, users; SET citus.next_shard_id TO 1810000; diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index f9fc5a164..b3e47474f 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -98,19 +98,24 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Plan": { "Node Type": "Sort", "Parallel Aware": false, + "Async Capable": false, "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"], "Plans": [ { "Node Type": "Aggregate", "Strategy": "Hashed", "Partial Mode": "Simple", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Group Key": ["remote_scan.l_quantity"], "Plans": [ { "Node Type": "Custom Scan", + "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Adaptive", "Parallel Aware": false, + "Async Capable": false, "Distributed Query": { "Job": { "Task Count": 2, @@ -126,11 +131,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Strategy": "Hashed", "Partial Mode": "Simple", "Parallel Aware": false, + "Async Capable": false, "Group Key": ["l_quantity"], "Plans": [ { "Node Type": "Seq Scan", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Relation Name": "lineitem_360000", "Alias": "lineitem" } @@ -172,6 +180,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Sort false + false (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)) remote_scan.l_quantity @@ -181,15 +190,19 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Aggregate Hashed Simple + Outer false + false remote_scan.l_quantity Custom Scan + Outer Citus Adaptive false + false 2 @@ -205,13 +218,16 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Hashed Simple false + false l_quantity Seq Scan + Outer false + false lineitem_360000 lineitem @@ -250,6 +266,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Plan: Node Type: "Sort" Parallel Aware: false + Async Capable: false Sort Key: - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))" - "remote_scan.l_quantity" @@ -257,13 +274,17 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Group Key: - "remote_scan.l_quantity" Plans: - Node Type: "Custom Scan" + Parent Relationship: "Outer" Custom Plan Provider: "Citus Adaptive" Parallel Aware: false + Async Capable: false Distributed Query: Job: Task Count: 2 @@ -276,11 +297,14 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Strategy: "Hashed" Partial Mode: "Simple" Parallel Aware: false + Async Capable: false Group Key: - "l_quantity" Plans: - Node Type: "Seq Scan" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Relation Name: "lineitem_360000" Alias: "lineitem" @@ -1135,11 +1159,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Strategy": "Plain", "Partial Mode": "Simple", "Parallel Aware": false, + "Async Capable": false, "Plans": [ { "Node Type": "Custom Scan", + "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Adaptive", "Parallel Aware": false, + "Async Capable": false, "Distributed Query": { "Job": { "Task Count": 6, @@ -1191,11 +1218,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Plain Simple false + false Custom Scan + Outer Citus Adaptive false + false 6 @@ -1258,10 +1288,13 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false + Async Capable: false Plans: - Node Type: "Custom Scan" + Parent Relationship: "Outer" Custom Plan Provider: "Citus Adaptive" Parallel Aware: false + Async Capable: false Distributed Query: Job: Task Count: 6 @@ -1684,6 +1717,7 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); "Plan": { + "Node Type": "Result", + "Parallel Aware": false,+ + "Async Capable": false, + "Actual Rows": 1, + "Actual Loops": 1 + }, + @@ -1707,6 +1741,7 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); + Result + false + + false + 1 + 1 + + @@ -1728,6 +1763,7 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze(); - Plan: + Node Type: "Result" + Parallel Aware: false+ + Async Capable: false + Actual Rows: 1 + Actual Loops: 1 + Triggers: @@ -2115,6 +2151,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT "Node Type": "Custom Scan", "Custom Plan Provider": "Citus Adaptive", "Parallel Aware": false, + "Async Capable": false, "Actual Rows": 0, "Actual Loops": 1, "Distributed Query": { @@ -2131,6 +2168,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT "Node Type": "ModifyTable", "Operation": "Insert", "Parallel Aware": false, + "Async Capable": false, "Relation Name": "explain_pk_570013", "Alias": "citus_table_alias", "Actual Rows": 0, @@ -2138,7 +2176,9 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT "Plans": [ { "Node Type": "Result", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Actual Rows": 1, "Actual Loops": 1 } @@ -2167,6 +2207,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * F "Node Type": "Custom Scan", "Custom Plan Provider": "Citus Adaptive", "Parallel Aware": false, + "Async Capable": false, "Actual Rows": 0, "Actual Loops": 1, "Distributed Query": { @@ -2184,6 +2225,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * F "Plan": { "Node Type": "Seq Scan", "Parallel Aware": false, + "Async Capable": false, "Relation Name": "explain_pk_570013", "Alias": "explain_pk", "Actual Rows": 0, @@ -2212,6 +2254,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO Custom Scan Citus Adaptive false + false 0 1 @@ -2228,6 +2271,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO ModifyTable Insert false + false explain_pk_570013 citus_table_alias 0 @@ -2235,7 +2279,9 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO Result + Outer false + false 1 1 @@ -2263,6 +2309,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FR Custom Scan Citus Adaptive false + false 0 1 @@ -2280,6 +2327,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FR Seq Scan false + false explain_pk_570013 explain_pk 0 diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index b74cdc179..e85253031 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -1,7 +1,7 @@ -- -- MULTI_METADATA_SYNC -- --- this test has different output for PG13/14 compared to PG15 +-- this test has different output for PG14 compared to PG15 -- In PG15, public schema is owned by pg_database_owner role -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 SHOW server_version \gset diff --git a/src/test/regress/expected/multi_metadata_sync_0.out b/src/test/regress/expected/multi_metadata_sync_0.out index 16c319d4e..6e1ba6525 100644 --- a/src/test/regress/expected/multi_metadata_sync_0.out +++ b/src/test/regress/expected/multi_metadata_sync_0.out @@ -1,7 +1,7 @@ -- -- MULTI_METADATA_SYNC -- --- this test has different output for PG13/14 compared to PG15 +-- this test has different output for PG14 compared to PG15 -- In PG15, public schema is owned by pg_database_owner role -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 SHOW server_version \gset diff --git a/src/test/regress/expected/multi_mx_explain.out b/src/test/regress/expected/multi_mx_explain.out index 1c585a027..beb374d23 100644 --- a/src/test/regress/expected/multi_mx_explain.out +++ b/src/test/regress/expected/multi_mx_explain.out @@ -85,19 +85,24 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Plan": { "Node Type": "Sort", "Parallel Aware": false, + "Async Capable": false, "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"], "Plans": [ { "Node Type": "Aggregate", "Strategy": "Hashed", "Partial Mode": "Simple", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Group Key": ["remote_scan.l_quantity"], "Plans": [ { "Node Type": "Custom Scan", + "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Adaptive", "Parallel Aware": false, + "Async Capable": false, "Distributed Query": { "Job": { "Task Count": 16, @@ -113,11 +118,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Strategy": "Hashed", "Partial Mode": "Simple", "Parallel Aware": false, + "Async Capable": false, "Group Key": ["l_quantity"], "Plans": [ { "Node Type": "Seq Scan", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Relation Name": "lineitem_mx_1220052", "Alias": "lineitem_mx" } @@ -153,6 +161,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Sort false + false (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)) remote_scan.l_quantity @@ -162,15 +171,19 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Aggregate Hashed Simple + Outer false + false remote_scan.l_quantity Custom Scan + Outer Citus Adaptive false + false 16 @@ -186,13 +199,16 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Hashed Simple false + false l_quantity Seq Scan + Outer false + false lineitem_mx_1220052 lineitem_mx @@ -224,6 +240,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Plan: Node Type: "Sort" Parallel Aware: false + Async Capable: false Sort Key: - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))" - "remote_scan.l_quantity" @@ -231,13 +248,17 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) - Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Group Key: - "remote_scan.l_quantity" Plans: - Node Type: "Custom Scan" + Parent Relationship: "Outer" Custom Plan Provider: "Citus Adaptive" Parallel Aware: false + Async Capable: false Distributed Query: Job: Task Count: 16 @@ -250,11 +271,14 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Strategy: "Hashed" Partial Mode: "Simple" Parallel Aware: false + Async Capable: false Group Key: - "l_quantity" Plans: - Node Type: "Seq Scan" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Relation Name: "lineitem_mx_1220052" Alias: "lineitem_mx" @@ -528,11 +552,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Strategy": "Plain", "Partial Mode": "Simple", "Parallel Aware": false, + "Async Capable": false, "Plans": [ { "Node Type": "Custom Scan", + "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Adaptive", "Parallel Aware": false, + "Async Capable": false, "Distributed Query": { "Job": { "Task Count": 16, @@ -548,34 +575,45 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) "Strategy": "Plain", "Partial Mode": "Simple", "Parallel Aware": false, + "Async Capable": false, "Plans": [ { "Node Type": "Hash Join", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Join Type": "Inner", "Inner Unique": false, "Hash Cond": "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)", "Plans": [ { "Node Type": "Hash Join", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Join Type": "Inner", "Inner Unique": false, "Hash Cond": "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)", "Plans": [ { "Node Type": "Seq Scan", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Relation Name": "supplier_mx_1220087", "Alias": "supplier_mx" }, { "Node Type": "Hash", + "Parent Relationship": "Inner", "Parallel Aware": false, + "Async Capable": false, "Plans": [ { "Node Type": "Seq Scan", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Relation Name": "lineitem_mx_1220052", "Alias": "lineitem_mx" } @@ -585,28 +623,38 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) }, { "Node Type": "Hash", + "Parent Relationship": "Inner", "Parallel Aware": false, + "Async Capable": false, "Plans": [ { "Node Type": "Hash Join", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Join Type": "Inner", "Inner Unique": false, "Hash Cond": "(customer_mx.c_custkey = orders_mx.o_custkey)", "Plans": [ { "Node Type": "Seq Scan", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Relation Name": "customer_mx_1220084", "Alias": "customer_mx" }, { "Node Type": "Hash", + "Parent Relationship": "Inner", "Parallel Aware": false, + "Async Capable": false, "Plans": [ { "Node Type": "Seq Scan", + "Parent Relationship": "Outer", "Parallel Aware": false, + "Async Capable": false, "Relation Name": "orders_mx_1220068", "Alias": "orders_mx" } @@ -653,11 +701,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Plain Simple false + false Custom Scan + Outer Citus Adaptive false + false 16 @@ -673,34 +724,45 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Plain Simple false + false Hash Join + Outer false + false Inner false (lineitem_mx.l_orderkey = orders_mx.o_orderkey) Hash Join + Outer false + false Inner false (supplier_mx.s_suppkey = lineitem_mx.l_suppkey) Seq Scan + Outer false + false supplier_mx_1220087 supplier_mx Hash + Inner false + false Seq Scan + Outer false + false lineitem_mx_1220052 lineitem_mx @@ -710,28 +772,38 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Hash + Inner false + false Hash Join + Outer false + false Inner false (customer_mx.c_custkey = orders_mx.o_custkey) Seq Scan + Outer false + false customer_mx_1220084 customer_mx Hash + Inner false + false Seq Scan + Outer false + false orders_mx_1220068 orders_mx @@ -775,10 +847,13 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false + Async Capable: false Plans: - Node Type: "Custom Scan" + Parent Relationship: "Outer" Custom Plan Provider: "Citus Adaptive" Parallel Aware: false + Async Capable: false Distributed Query: Job: Task Count: 16 @@ -791,48 +866,69 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false + Async Capable: false Plans: - Node Type: "Hash Join" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Join Type: "Inner" Inner Unique: false Hash Cond: "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)" Plans: - Node Type: "Hash Join" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Join Type: "Inner" Inner Unique: false Hash Cond: "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)" Plans: - Node Type: "Seq Scan" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Relation Name: "supplier_mx_1220087" Alias: "supplier_mx" - Node Type: "Hash" + Parent Relationship: "Inner" Parallel Aware: false + Async Capable: false Plans: - Node Type: "Seq Scan" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Relation Name: "lineitem_mx_1220052" Alias: "lineitem_mx" - Node Type: "Hash" + Parent Relationship: "Inner" Parallel Aware: false + Async Capable: false Plans: - Node Type: "Hash Join" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Join Type: "Inner" Inner Unique: false Hash Cond: "(customer_mx.c_custkey = orders_mx.o_custkey)" Plans: - Node Type: "Seq Scan" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Relation Name: "customer_mx_1220084" Alias: "customer_mx" - Node Type: "Hash" + Parent Relationship: "Inner" Parallel Aware: false + Async Capable: false Plans: - Node Type: "Seq Scan" + Parent Relationship: "Outer" Parallel Aware: false + Async Capable: false Relation Name: "orders_mx_1220068" Alias: "orders_mx" diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index e4f94c053..8483a2891 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -1,10 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14 -\gset -\if :server_version_ge_14 -\else -\q -\endif create schema pg14; set search_path to pg14; SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/expected/pg14_0.out b/src/test/regress/expected/pg14_0.out deleted file mode 100644 index cff095489..000000000 --- a/src/test/regress/expected/pg14_0.out +++ /dev/null @@ -1,6 +0,0 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14 -\gset -\if :server_version_ge_14 -\else -\q diff --git a/src/test/regress/expected/sql_procedure.out b/src/test/regress/expected/sql_procedure.out index 9e3aae5c7..63802b354 100644 --- a/src/test/regress/expected/sql_procedure.out +++ b/src/test/regress/expected/sql_procedure.out @@ -37,7 +37,7 @@ CREATE PROCEDURE test_procedure_commit(tt_id int, tt_org_id int) LANGUAGE SQL AS COMMIT; $$; CALL test_procedure_commit(2,5); -ERROR: COMMIT is not allowed in a SQL function +ERROR: COMMIT is not allowed in an SQL function CONTEXT: SQL function "test_procedure_commit" during startup SELECT * FROM test_table ORDER BY 1, 2; id | org_id @@ -52,7 +52,7 @@ CREATE PROCEDURE test_procedure_rollback(tt_id int, tt_org_id int) LANGUAGE SQL COMMIT; $$; CALL test_procedure_rollback(2,15); -ERROR: ROLLBACK is not allowed in a SQL function +ERROR: ROLLBACK is not allowed in an SQL function CONTEXT: SQL function "test_procedure_rollback" during startup SELECT * FROM test_table ORDER BY 1, 2; id | org_id diff --git a/src/test/regress/expected/stat_statements.out b/src/test/regress/expected/stat_statements.out index 537bb4e9b..a3e2f673f 100644 --- a/src/test/regress/expected/stat_statements.out +++ b/src/test/regress/expected/stat_statements.out @@ -2,12 +2,7 @@ -- stat_statements -- -- tests citus_stat_statements functionality -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14 -\gset -\if :server_version_ge_14 SET compute_query_id = 'on'; -\endif -- check if pg_stat_statements is available SELECT name FROM pg_available_extensions WHERE name = 'pg_stat_statements'; name @@ -72,11 +67,7 @@ select query, calls from citus_stat_statements(); insert into test values($1) | 1 (1 row) -\if :server_version_ge_14 SET compute_query_id = 'off'; -\else -set citus.stat_statements_track = 'none'; -\endif -- for pg >= 14, since compute_query_id is off, this insert -- shouldn't be tracked -- for pg < 14, we disable it explicitly so that we don't need @@ -88,11 +79,7 @@ select query, calls from citus_stat_statements(); insert into test values($1) | 1 (1 row) -\if :server_version_ge_14 SET compute_query_id = 'on'; -\else -RESET citus.stat_statements_track; -\endif SELECT citus_stat_statements_reset(); citus_stat_statements_reset --------------------------------------------------------------------- @@ -646,6 +633,4 @@ CONTEXT: PL/pgSQL function citus_stat_statements() line XX at RAISE -- drop created tables DROP TABLE stat_test_text, stat_test_bigint, stat_test_bigint_other, stat_test_reference; DROP FUNCTION normalize_query_string(text); -\if :server_version_ge_14 SET compute_query_id = 'off'; -\endif diff --git a/src/test/regress/expected/tableam.out b/src/test/regress/expected/tableam.out index 47f4c241d..8e6fe5205 100644 --- a/src/test/regress/expected/tableam.out +++ b/src/test/regress/expected/tableam.out @@ -114,7 +114,7 @@ delete from test_ref; ERROR: fake_tuple_delete not implemented CONTEXT: while executing command on localhost:xxxxx update test_ref set a=2; -ERROR: fake_tuple_update not implemented +ERROR: fake_fetch_row_version not implemented CONTEXT: while executing command on localhost:xxxxx RESET client_min_messages; -- ddl events should include "USING fake_am" diff --git a/src/test/regress/expected/window_functions.out b/src/test/regress/expected/window_functions.out index 6f30a49e3..d4718c4dd 100644 --- a/src/test/regress/expected/window_functions.out +++ b/src/test/regress/expected/window_functions.out @@ -3,8 +3,6 @@ -- =================================================================== -- test top level window functions that are pushdownable -- =================================================================== --- This test file has an alternative output because of use of --- incremental sort in some explain outputs in PG13 -- -- a very simple window function with an aggregate and a window function -- distribution column is on the partition by clause diff --git a/src/test/regress/expected/window_functions_0.out b/src/test/regress/expected/window_functions_0.out deleted file mode 100644 index c5a132301..000000000 --- a/src/test/regress/expected/window_functions_0.out +++ /dev/null @@ -1,1657 +0,0 @@ --- --- WINDOW_FUNCTIONS --- =================================================================== --- test top level window functions that are pushdownable --- =================================================================== --- This test file has an alternative output because of use of --- incremental sort in some explain outputs in PG13 --- --- a very simple window function with an aggregate and a window function --- distribution column is on the partition by clause -SELECT - user_id, COUNT(*) OVER (PARTITION BY user_id), - rank() OVER (PARTITION BY user_id) -FROM - users_table -ORDER BY - 1 DESC, 2 DESC, 3 DESC -LIMIT 5; - user_id | count | rank ---------------------------------------------------------------------- - 6 | 10 | 1 - 6 | 10 | 1 - 6 | 10 | 1 - 6 | 10 | 1 - 6 | 10 | 1 -(5 rows) - --- a more complicated window clause, including an aggregate --- in both the window clause and the target entry -SELECT - user_id, avg(avg(value_3)) OVER (PARTITION BY user_id, MIN(value_2)) -FROM - users_table -GROUP BY - 1 -ORDER BY - 2 DESC NULLS LAST, 1 DESC; - user_id | avg ---------------------------------------------------------------------- - 2 | 3 - 4 | 2.82608695652174 - 3 | 2.70588235294118 - 6 | 2.6 - 1 | 2.57142857142857 - 5 | 2.46153846153846 -(6 rows) - --- window clause operates on the results of a subquery -SELECT - user_id, max(value_1) OVER (PARTITION BY user_id, MIN(value_2)) -FROM ( - SELECT - DISTINCT us.user_id, us.value_2, value_1, random() as r1 - FROM - users_table as us, events_table - WHERE - us.user_id = events_table.user_id AND event_type IN (1,2) - ORDER BY - user_id, value_2 - ) s -GROUP BY - 1, value_1 -ORDER BY - 2 DESC, 1; - user_id | max ---------------------------------------------------------------------- - 1 | 5 - 3 | 5 - 3 | 5 - 4 | 5 - 5 | 5 - 5 | 5 - 6 | 5 - 6 | 5 - 1 | 4 - 2 | 4 - 3 | 4 - 3 | 4 - 3 | 4 - 4 | 4 - 4 | 4 - 5 | 4 - 5 | 4 - 1 | 3 - 2 | 3 - 2 | 3 - 2 | 3 - 6 | 3 - 2 | 2 - 4 | 2 - 4 | 2 - 4 | 2 - 6 | 2 - 1 | 1 - 3 | 1 - 5 | 1 - 6 | 1 - 5 | 0 -(32 rows) - --- window function operates on the results of --- a join --- we also want to verify that this doesn't crash --- when the logging level is DEBUG4 -SET log_min_messages TO DEBUG4; -SELECT - us.user_id, - SUM(us.value_1) OVER (PARTITION BY us.user_id) -FROM - users_table us - JOIN - events_table ev - ON (us.user_id = ev.user_id) -GROUP BY - 1, - value_1 -ORDER BY - 1, - 2 -LIMIT 5; - user_id | sum ---------------------------------------------------------------------- - 1 | 13 - 1 | 13 - 1 | 13 - 1 | 13 - 2 | 10 -(5 rows) - --- the same query, but this time join with an alias -SELECT - user_id, value_1, SUM(j.value_1) OVER (PARTITION BY j.user_id) -FROM - (users_table us - JOIN - events_table ev - USING (user_id ) - ) j -GROUP BY - user_id, - value_1 -ORDER BY - 3 DESC, 2 DESC, 1 DESC -LIMIT 5; - user_id | value_1 | sum ---------------------------------------------------------------------- - 5 | 5 | 15 - 4 | 5 | 15 - 3 | 5 | 15 - 5 | 4 | 15 - 4 | 4 | 15 -(5 rows) - --- querying views that have window functions should be ok -CREATE VIEW window_view AS -SELECT - DISTINCT user_id, rank() OVER (PARTITION BY user_id ORDER BY value_1) -FROM - users_table -GROUP BY - user_id, value_1 -HAVING count(*) > 1; --- Window function in View works -SELECT * -FROM - window_view -ORDER BY - 2 DESC, 1 -LIMIT 10; - user_id | rank ---------------------------------------------------------------------- - 5 | 6 - 2 | 5 - 4 | 5 - 5 | 5 - 2 | 4 - 3 | 4 - 4 | 4 - 5 | 4 - 6 | 4 - 2 | 3 -(10 rows) - --- the other way around also should work fine --- query a view using window functions -CREATE VIEW users_view AS SELECT * FROM users_table; -SELECT - DISTINCT user_id, rank() OVER (PARTITION BY user_id ORDER BY value_1) -FROM - users_view -GROUP BY - user_id, value_1 -HAVING count(*) > 4 -ORDER BY - 2 DESC, 1; - user_id | rank ---------------------------------------------------------------------- - 4 | 2 - 5 | 2 - 2 | 1 - 3 | 1 - 4 | 1 - 5 | 1 -(6 rows) - -DROP VIEW users_view, window_view; --- window functions along with subquery in HAVING -SELECT - user_id, count (user_id) OVER (PARTITION BY user_id) -FROM - users_table -GROUP BY - user_id HAVING avg(value_1) < (SELECT min(k_no) FROM users_ref_test_table) -ORDER BY 1 DESC,2 DESC -LIMIT 1; - user_id | count ---------------------------------------------------------------------- - 6 | 1 -(1 row) - --- window function uses columns from two different tables -SELECT - DISTINCT ON (events_table.user_id, rnk) events_table.user_id, rank() OVER my_win AS rnk -FROM - events_table, users_table -WHERE - users_table.user_id = events_table.user_id -WINDOW - my_win AS (PARTITION BY events_table.user_id, users_table.value_1 ORDER BY events_table.time DESC) -ORDER BY - rnk DESC, 1 DESC -LIMIT 10; - user_id | rnk ---------------------------------------------------------------------- - 3 | 121 - 5 | 118 - 2 | 116 - 3 | 115 - 4 | 113 - 2 | 111 - 5 | 109 - 3 | 109 - 4 | 106 - 2 | 106 -(10 rows) - --- the same query with reference table column is also on the partition by clause -SELECT - DISTINCT ON (events_table.user_id, rnk) events_table.user_id, rank() OVER my_win AS rnk -FROM - events_table, users_ref_test_table uref -WHERE - uref.id = events_table.user_id -WINDOW - my_win AS (PARTITION BY events_table.user_id, uref.k_no ORDER BY events_table.time DESC) -ORDER BY - rnk DESC, 1 DESC -LIMIT 10; - user_id | rnk ---------------------------------------------------------------------- - 2 | 24 - 2 | 23 - 2 | 22 - 3 | 21 - 2 | 21 - 3 | 20 - 2 | 20 - 3 | 19 - 2 | 19 - 3 | 18 -(10 rows) - --- similar query with no distribution column on the partition by clause -SELECT - DISTINCT ON (events_table.user_id, rnk) events_table.user_id, rank() OVER my_win AS rnk -FROM - events_table, users_ref_test_table uref -WHERE - uref.id = events_table.user_id -WINDOW - my_win AS (PARTITION BY events_table.value_2, uref.k_no ORDER BY events_table.time DESC) -ORDER BY - rnk DESC, 1 DESC -LIMIT 10; - user_id | rnk ---------------------------------------------------------------------- - 3 | 7 - 2 | 7 - 3 | 6 - 2 | 6 - 4 | 5 - 3 | 5 - 2 | 5 - 1 | 5 - 6 | 4 - 5 | 4 -(10 rows) - --- ORDER BY in the window function is an aggregate -SELECT - user_id, rank() OVER my_win as rnk, avg(value_2) as avg_val_2 -FROM - events_table -GROUP BY - user_id, date_trunc('day', time) -WINDOW - my_win AS (PARTITION BY user_id ORDER BY avg(event_type) DESC) -ORDER BY - 3 DESC, 2 DESC, 1 DESC; - user_id | rnk | avg_val_2 ---------------------------------------------------------------------- - 1 | 1 | 3.3750000000000000 - 3 | 2 | 3.1666666666666667 - 5 | 1 | 2.6666666666666667 - 6 | 1 | 2.5000000000000000 - 4 | 1 | 2.5000000000000000 - 2 | 1 | 2.4736842105263158 - 4 | 2 | 2.4000000000000000 - 1 | 2 | 2.1428571428571429 - 5 | 2 | 2.0909090909090909 - 6 | 2 | 2.0000000000000000 - 2 | 2 | 2.0000000000000000 - 3 | 1 | 1.8000000000000000 -(12 rows) - --- lets push the limits of writing complex expressions aling with the window functions -SELECT - COUNT(*) OVER (PARTITION BY user_id, user_id + 1), - rank() OVER (PARTITION BY user_id) as cnt1, - COUNT(*) OVER (PARTITION BY user_id, abs(value_1 - value_2)) as cnt2, - date_trunc('min', lag(time) OVER (PARTITION BY user_id ORDER BY time)) as datee, - rank() OVER my_win as rnnk, - avg(CASE - WHEN user_id > 4 - THEN value_1 - ELSE value_2 - END) FILTER (WHERE user_id > 2) OVER my_win_2 as filtered_count, - sum(user_id * (5.0 / (value_1 + value_2 + 0.1)) * value_3) FILTER (WHERE value_1::text LIKE '%1%') OVER my_win_4 as cnt_with_filter_2 -FROM - users_table -WINDOW - my_win AS (PARTITION BY user_id, (value_1%3)::int ORDER BY time DESC), - my_win_2 AS (PARTITION BY user_id, (value_1)::int ORDER BY time DESC), - my_win_3 AS (PARTITION BY user_id, date_trunc('min', time)), - my_win_4 AS (my_win_3 ORDER BY value_2, value_3) -ORDER BY - cnt_with_filter_2 DESC NULLS LAST, filtered_count DESC NULLS LAST, datee DESC NULLS LAST, rnnk DESC, cnt2 DESC, cnt1 DESC, user_id DESC -LIMIT 5; - count | cnt1 | cnt2 | datee | rnnk | filtered_count | cnt_with_filter_2 ---------------------------------------------------------------------- - 23 | 1 | 7 | Thu Nov 23 02:14:00 2017 | 6 | 0.00000000000000000000 | 72.7272727272727 - 10 | 1 | 3 | Wed Nov 22 23:01:00 2017 | 1 | 1.00000000000000000000 | 57.1428571428571 - 17 | 1 | 5 | Wed Nov 22 23:24:00 2017 | 8 | 3.0000000000000000 | 28.5714285714286 - 17 | 1 | 5 | | 10 | 2.6666666666666667 | 28.5714285714286 - 17 | 1 | 5 | Thu Nov 23 00:15:00 2017 | 7 | 3.6666666666666667 | 24.1935483870968 -(5 rows) - --- some tests with GROUP BY along with PARTITION BY -SELECT - user_id, - rank() OVER my_win as my_rank, - avg(avg(event_type)) OVER my_win_2 as avg, - max(time) as mx_time -FROM - events_table -GROUP BY - user_id, - value_2 -WINDOW - my_win AS (PARTITION BY user_id, max(event_type) ORDER BY count(*) DESC), - my_win_2 AS (PARTITION BY user_id, avg(user_id) ORDER BY count(*) DESC) -ORDER BY - avg DESC, - mx_time DESC, - my_rank DESC, - user_id DESC; - user_id | my_rank | avg | mx_time ---------------------------------------------------------------------- - 6 | 1 | 3.0000000000000000 | Thu Nov 23 14:00:13.20013 2017 - 6 | 2 | 3.0000000000000000 | Thu Nov 23 11:16:13.106691 2017 - 6 | 1 | 3.0000000000000000 | Thu Nov 23 07:27:32.822068 2017 - 3 | 1 | 2.9857142857142857 | Thu Nov 23 16:31:56.219594 2017 - 4 | 2 | 2.9555555555555556 | Thu Nov 23 14:19:25.765876 2017 - 4 | 1 | 2.9555555555555556 | Thu Nov 23 08:36:53.871919 2017 - 1 | 4 | 2.8633333333333333 | Wed Nov 22 21:06:57.457147 2017 - 1 | 1 | 2.8250000000000000 | Thu Nov 23 21:54:46.924477 2017 - 2 | 2 | 2.7738095238095238 | Thu Nov 23 13:27:37.441959 2017 - 1 | 2 | 2.7722222222222222 | Thu Nov 23 09:23:30.994345 2017 - 3 | 1 | 2.7682539682539682 | Thu Nov 23 01:17:49.040685 2017 - 2 | 1 | 2.7142857142857143 | Thu Nov 23 15:58:49.273421 2017 - 1 | 3 | 2.5791666666666667 | Thu Nov 23 11:09:38.074595 2017 - 3 | 1 | 2.5714285714285714 | Thu Nov 23 16:44:41.903713 2017 - 2 | 1 | 2.5158730158730159 | Thu Nov 23 14:02:47.738901 2017 - 4 | 1 | 2.47777777777777778333 | Thu Nov 23 16:20:33.264457 2017 - 4 | 3 | 2.47777777777777778333 | Thu Nov 23 08:14:18.231273 2017 - 4 | 3 | 2.47777777777777778333 | Thu Nov 23 07:32:45.521278 2017 - 1 | 1 | 2.4000000000000000 | Thu Nov 23 10:23:27.617726 2017 - 2 | 1 | 2.3869047619047619 | Thu Nov 23 17:26:14.563216 2017 - 3 | 1 | 2.3841269841269841 | Thu Nov 23 18:08:26.550729 2017 - 3 | 1 | 2.3841269841269841 | Thu Nov 23 09:38:45.338008 2017 - 3 | 2 | 2.3841269841269841 | Thu Nov 23 06:44:50.887182 2017 - 2 | 2 | 2.3095238095238095 | Thu Nov 23 04:05:16.217731 2017 - 5 | 2 | 2.3000000000000000 | Thu Nov 23 14:28:51.833214 2017 - 5 | 2 | 2.3000000000000000 | Thu Nov 23 14:23:09.889786 2017 - 4 | 1 | 2.2000000000000000 | Thu Nov 23 18:10:21.338399 2017 - 2 | 1 | 2.09126984126984126667 | Thu Nov 23 03:35:04.321504 2017 - 5 | 1 | 2.0000000000000000 | Thu Nov 23 16:11:02.929469 2017 - 5 | 1 | 2.0000000000000000 | Thu Nov 23 14:40:40.467511 2017 - 5 | 1 | 2.0000000000000000 | Thu Nov 23 13:26:45.571108 2017 -(31 rows) - --- test for range and rows mode and different window functions --- mostly to make sure that deparsing works fine -SELECT - user_id, - rank() OVER (PARTITION BY user_id ROWS BETWEEN - UNBOUNDED PRECEDING AND CURRENT ROW), - dense_rank() OVER (PARTITION BY user_id RANGE BETWEEN - UNBOUNDED PRECEDING AND CURRENT ROW), - CUME_DIST() OVER (PARTITION BY user_id RANGE BETWEEN - UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), - PERCENT_RANK() OVER (PARTITION BY user_id ORDER BY avg(value_1) RANGE BETWEEN - UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) -FROM - users_table -GROUP BY - 1 -ORDER BY - 4 DESC,3 DESC,2 DESC ,1 DESC; - user_id | rank | dense_rank | cume_dist | percent_rank ---------------------------------------------------------------------- - 6 | 1 | 1 | 1 | 0 - 5 | 1 | 1 | 1 | 0 - 4 | 1 | 1 | 1 | 0 - 3 | 1 | 1 | 1 | 0 - 2 | 1 | 1 | 1 | 0 - 1 | 1 | 1 | 1 | 0 -(6 rows) - --- test exclude supported -SELECT - user_id, - value_1, - array_agg(value_1) OVER (PARTITION BY user_id ORDER BY value_1 RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), - array_agg(value_1) OVER (PARTITION BY user_id ORDER BY value_1 RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE CURRENT ROW) -FROM - users_table -WHERE - user_id > 2 AND user_id < 6 -ORDER BY - user_id, value_1, 3, 4; - user_id | value_1 | array_agg | array_agg ---------------------------------------------------------------------- - 3 | 0 | {0} | - 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} - 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} - 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} - 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} - 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} - 3 | 1 | {0,1,1,1,1,1,1} | {0,1,1,1,1,1} - 3 | 2 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,1,2} - 3 | 2 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,1,2} - 3 | 3 | {0,1,1,1,1,1,1,2,2,3,3,3} | {0,1,1,1,1,1,1,2,2,3,3} - 3 | 3 | {0,1,1,1,1,1,1,2,2,3,3,3} | {0,1,1,1,1,1,1,2,2,3,3} - 3 | 3 | {0,1,1,1,1,1,1,2,2,3,3,3} | {0,1,1,1,1,1,1,2,2,3,3} - 3 | 4 | {0,1,1,1,1,1,1,2,2,3,3,3,4,4,4,4} | {0,1,1,1,1,1,1,2,2,3,3,3,4,4,4} - 3 | 4 | {0,1,1,1,1,1,1,2,2,3,3,3,4,4,4,4} | {0,1,1,1,1,1,1,2,2,3,3,3,4,4,4} - 3 | 4 | {0,1,1,1,1,1,1,2,2,3,3,3,4,4,4,4} | {0,1,1,1,1,1,1,2,2,3,3,3,4,4,4} - 3 | 4 | {0,1,1,1,1,1,1,2,2,3,3,3,4,4,4,4} | {0,1,1,1,1,1,1,2,2,3,3,3,4,4,4} - 3 | 5 | {0,1,1,1,1,1,1,2,2,3,3,3,4,4,4,4,5} | {0,1,1,1,1,1,1,2,2,3,3,3,4,4,4,4} - 4 | 0 | {0,0,0,0} | {0,0,0} - 4 | 0 | {0,0,0,0} | {0,0,0} - 4 | 0 | {0,0,0,0} | {0,0,0} - 4 | 0 | {0,0,0,0} | {0,0,0} - 4 | 1 | {0,0,0,0,1} | {0,0,0,0} - 4 | 2 | {0,0,0,0,1,2,2,2} | {0,0,0,0,1,2,2} - 4 | 2 | {0,0,0,0,1,2,2,2} | {0,0,0,0,1,2,2} - 4 | 2 | {0,0,0,0,1,2,2,2} | {0,0,0,0,1,2,2} - 4 | 3 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3} | {0,0,0,0,1,2,2,2,3,3,3,3,3} - 4 | 3 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3} | {0,0,0,0,1,2,2,2,3,3,3,3,3} - 4 | 3 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3} | {0,0,0,0,1,2,2,2,3,3,3,3,3} - 4 | 3 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3} | {0,0,0,0,1,2,2,2,3,3,3,3,3} - 4 | 3 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3} | {0,0,0,0,1,2,2,2,3,3,3,3,3} - 4 | 3 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3} | {0,0,0,0,1,2,2,2,3,3,3,3,3} - 4 | 4 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4} - 4 | 4 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4} - 4 | 4 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4} - 4 | 4 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4} - 4 | 4 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4} - 4 | 4 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4} - 4 | 4 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4} - 4 | 5 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4,5,5} | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4,5} - 4 | 5 | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4,5,5} | {0,0,0,0,1,2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4,5} - 5 | 0 | {0,0} | {0} - 5 | 0 | {0,0} | {0} - 5 | 1 | {0,0,1,1,1} | {0,0,1,1} - 5 | 1 | {0,0,1,1,1} | {0,0,1,1} - 5 | 1 | {0,0,1,1,1} | {0,0,1,1} - 5 | 2 | {0,0,1,1,1,2,2,2,2,2,2} | {0,0,1,1,1,2,2,2,2,2} - 5 | 2 | {0,0,1,1,1,2,2,2,2,2,2} | {0,0,1,1,1,2,2,2,2,2} - 5 | 2 | {0,0,1,1,1,2,2,2,2,2,2} | {0,0,1,1,1,2,2,2,2,2} - 5 | 2 | {0,0,1,1,1,2,2,2,2,2,2} | {0,0,1,1,1,2,2,2,2,2} - 5 | 2 | {0,0,1,1,1,2,2,2,2,2,2} | {0,0,1,1,1,2,2,2,2,2} - 5 | 2 | {0,0,1,1,1,2,2,2,2,2,2} | {0,0,1,1,1,2,2,2,2,2} - 5 | 3 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3} - 5 | 3 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3} - 5 | 3 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3} - 5 | 3 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3} - 5 | 3 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3} - 5 | 3 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3} - 5 | 3 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3} - 5 | 3 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3} - 5 | 3 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3} - 5 | 4 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4} - 5 | 4 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4} - 5 | 4 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4} - 5 | 5 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4,5,5,5} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4,5,5} - 5 | 5 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4,5,5,5} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4,5,5} - 5 | 5 | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4,5,5,5} | {0,0,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4,5,5} -(66 rows) - --- test preceding and following on RANGE window -SELECT - user_id, - value_1, - array_agg(value_1) OVER range_window, - array_agg(value_1) OVER range_window_exclude -FROM - users_table -WHERE - user_id > 2 AND user_id < 6 -WINDOW - range_window as (PARTITION BY user_id ORDER BY value_1 RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING), - range_window_exclude as (PARTITION BY user_id ORDER BY value_1 RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) -ORDER BY - user_id, value_1, 3, 4; - user_id | value_1 | array_agg | array_agg ---------------------------------------------------------------------- - 3 | 0 | {0,1,1,1,1,1,1} | {1,1,1,1,1,1} - 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} - 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} - 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} - 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} - 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} - 3 | 1 | {0,1,1,1,1,1,1,2,2} | {0,1,1,1,1,1,2,2} - 3 | 2 | {1,1,1,1,1,1,2,2,3,3,3} | {1,1,1,1,1,1,2,3,3,3} - 3 | 2 | {1,1,1,1,1,1,2,2,3,3,3} | {1,1,1,1,1,1,2,3,3,3} - 3 | 3 | {2,2,3,3,3,4,4,4,4} | {2,2,3,3,4,4,4,4} - 3 | 3 | {2,2,3,3,3,4,4,4,4} | {2,2,3,3,4,4,4,4} - 3 | 3 | {2,2,3,3,3,4,4,4,4} | {2,2,3,3,4,4,4,4} - 3 | 4 | {3,3,3,4,4,4,4,5} | {3,3,3,4,4,4,5} - 3 | 4 | {3,3,3,4,4,4,4,5} | {3,3,3,4,4,4,5} - 3 | 4 | {3,3,3,4,4,4,4,5} | {3,3,3,4,4,4,5} - 3 | 4 | {3,3,3,4,4,4,4,5} | {3,3,3,4,4,4,5} - 3 | 5 | {4,4,4,4,5} | {4,4,4,4} - 4 | 0 | {0,0,0,0,1} | {0,0,0,1} - 4 | 0 | {0,0,0,0,1} | {0,0,0,1} - 4 | 0 | {0,0,0,0,1} | {0,0,0,1} - 4 | 0 | {0,0,0,0,1} | {0,0,0,1} - 4 | 1 | {0,0,0,0,1,2,2,2} | {0,0,0,0,2,2,2} - 4 | 2 | {1,2,2,2,3,3,3,3,3,3} | {1,2,2,3,3,3,3,3,3} - 4 | 2 | {1,2,2,2,3,3,3,3,3,3} | {1,2,2,3,3,3,3,3,3} - 4 | 2 | {1,2,2,2,3,3,3,3,3,3} | {1,2,2,3,3,3,3,3,3} - 4 | 3 | {2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {2,2,2,3,3,3,3,3,4,4,4,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {2,2,2,3,3,3,3,3,4,4,4,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {2,2,2,3,3,3,3,3,4,4,4,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {2,2,2,3,3,3,3,3,4,4,4,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {2,2,2,3,3,3,3,3,4,4,4,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,4,4,4,4,4,4,4} | {2,2,2,3,3,3,3,3,4,4,4,4,4,4,4} - 4 | 4 | {3,3,3,3,3,3,4,4,4,4,4,4,4,5,5} | {3,3,3,3,3,3,4,4,4,4,4,4,5,5} - 4 | 4 | {3,3,3,3,3,3,4,4,4,4,4,4,4,5,5} | {3,3,3,3,3,3,4,4,4,4,4,4,5,5} - 4 | 4 | {3,3,3,3,3,3,4,4,4,4,4,4,4,5,5} | {3,3,3,3,3,3,4,4,4,4,4,4,5,5} - 4 | 4 | {3,3,3,3,3,3,4,4,4,4,4,4,4,5,5} | {3,3,3,3,3,3,4,4,4,4,4,4,5,5} - 4 | 4 | {3,3,3,3,3,3,4,4,4,4,4,4,4,5,5} | {3,3,3,3,3,3,4,4,4,4,4,4,5,5} - 4 | 4 | {3,3,3,3,3,3,4,4,4,4,4,4,4,5,5} | {3,3,3,3,3,3,4,4,4,4,4,4,5,5} - 4 | 4 | {3,3,3,3,3,3,4,4,4,4,4,4,4,5,5} | {3,3,3,3,3,3,4,4,4,4,4,4,5,5} - 4 | 5 | {4,4,4,4,4,4,4,5,5} | {4,4,4,4,4,4,4,5} - 4 | 5 | {4,4,4,4,4,4,4,5,5} | {4,4,4,4,4,4,4,5} - 5 | 0 | {0,0,1,1,1} | {0,1,1,1} - 5 | 0 | {0,0,1,1,1} | {0,1,1,1} - 5 | 1 | {0,0,1,1,1,2,2,2,2,2,2} | {0,0,1,1,2,2,2,2,2,2} - 5 | 1 | {0,0,1,1,1,2,2,2,2,2,2} | {0,0,1,1,2,2,2,2,2,2} - 5 | 1 | {0,0,1,1,1,2,2,2,2,2,2} | {0,0,1,1,2,2,2,2,2,2} - 5 | 2 | {1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {1,1,1,2,2,2,2,2,3,3,3,3,3,3,3,3,3} - 5 | 2 | {1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {1,1,1,2,2,2,2,2,3,3,3,3,3,3,3,3,3} - 5 | 2 | {1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {1,1,1,2,2,2,2,2,3,3,3,3,3,3,3,3,3} - 5 | 2 | {1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {1,1,1,2,2,2,2,2,3,3,3,3,3,3,3,3,3} - 5 | 2 | {1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {1,1,1,2,2,2,2,2,3,3,3,3,3,3,3,3,3} - 5 | 2 | {1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3} | {1,1,1,2,2,2,2,2,3,3,3,3,3,3,3,3,3} - 5 | 3 | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4} - 5 | 3 | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4} - 5 | 3 | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4} - 5 | 3 | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4} - 5 | 3 | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4} - 5 | 3 | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4} - 5 | 3 | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4} - 5 | 3 | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4} - 5 | 3 | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,4,4,4} | {2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4} - 5 | 4 | {3,3,3,3,3,3,3,3,3,4,4,4,5,5,5} | {3,3,3,3,3,3,3,3,3,4,4,5,5,5} - 5 | 4 | {3,3,3,3,3,3,3,3,3,4,4,4,5,5,5} | {3,3,3,3,3,3,3,3,3,4,4,5,5,5} - 5 | 4 | {3,3,3,3,3,3,3,3,3,4,4,4,5,5,5} | {3,3,3,3,3,3,3,3,3,4,4,5,5,5} - 5 | 5 | {4,4,4,5,5,5} | {4,4,4,5,5} - 5 | 5 | {4,4,4,5,5,5} | {4,4,4,5,5} - 5 | 5 | {4,4,4,5,5,5} | {4,4,4,5,5} -(66 rows) - --- test preceding and following on ROW window -SELECT - user_id, - value_1, - array_agg(value_1) OVER row_window, - array_agg(value_1) OVER row_window_exclude -FROM - users_table -WHERE - user_id > 2 and user_id < 6 -WINDOW - row_window as (PARTITION BY user_id ORDER BY value_1 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING), - row_window_exclude as (PARTITION BY user_id ORDER BY value_1 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) -ORDER BY - user_id, value_1, 3, 4; - user_id | value_1 | array_agg | array_agg ---------------------------------------------------------------------- - 3 | 0 | {0,1} | {1} - 3 | 1 | {0,1,1} | {0,1} - 3 | 1 | {1,1,1} | {1,1} - 3 | 1 | {1,1,1} | {1,1} - 3 | 1 | {1,1,1} | {1,1} - 3 | 1 | {1,1,1} | {1,1} - 3 | 1 | {1,1,2} | {1,2} - 3 | 2 | {1,2,2} | {1,2} - 3 | 2 | {2,2,3} | {2,3} - 3 | 3 | {2,3,3} | {2,3} - 3 | 3 | {3,3,3} | {3,3} - 3 | 3 | {3,3,4} | {3,4} - 3 | 4 | {3,4,4} | {3,4} - 3 | 4 | {4,4,4} | {4,4} - 3 | 4 | {4,4,4} | {4,4} - 3 | 4 | {4,4,5} | {4,5} - 3 | 5 | {4,5} | {4} - 4 | 0 | {0,0} | {0} - 4 | 0 | {0,0,0} | {0,0} - 4 | 0 | {0,0,0} | {0,0} - 4 | 0 | {0,0,1} | {0,1} - 4 | 1 | {0,1,2} | {0,2} - 4 | 2 | {1,2,2} | {1,2} - 4 | 2 | {2,2,2} | {2,2} - 4 | 2 | {2,2,3} | {2,3} - 4 | 3 | {2,3,3} | {2,3} - 4 | 3 | {3,3,3} | {3,3} - 4 | 3 | {3,3,3} | {3,3} - 4 | 3 | {3,3,3} | {3,3} - 4 | 3 | {3,3,3} | {3,3} - 4 | 3 | {3,3,4} | {3,4} - 4 | 4 | {3,4,4} | {3,4} - 4 | 4 | {4,4,4} | {4,4} - 4 | 4 | {4,4,4} | {4,4} - 4 | 4 | {4,4,4} | {4,4} - 4 | 4 | {4,4,4} | {4,4} - 4 | 4 | {4,4,4} | {4,4} - 4 | 4 | {4,4,5} | {4,5} - 4 | 5 | {4,5,5} | {4,5} - 4 | 5 | {5,5} | {5} - 5 | 0 | {0,0} | {0} - 5 | 0 | {0,0,1} | {0,1} - 5 | 1 | {0,1,1} | {0,1} - 5 | 1 | {1,1,1} | {1,1} - 5 | 1 | {1,1,2} | {1,2} - 5 | 2 | {1,2,2} | {1,2} - 5 | 2 | {2,2,2} | {2,2} - 5 | 2 | {2,2,2} | {2,2} - 5 | 2 | {2,2,2} | {2,2} - 5 | 2 | {2,2,2} | {2,2} - 5 | 2 | {2,2,3} | {2,3} - 5 | 3 | {2,3,3} | {2,3} - 5 | 3 | {3,3,3} | {3,3} - 5 | 3 | {3,3,3} | {3,3} - 5 | 3 | {3,3,3} | {3,3} - 5 | 3 | {3,3,3} | {3,3} - 5 | 3 | {3,3,3} | {3,3} - 5 | 3 | {3,3,3} | {3,3} - 5 | 3 | {3,3,3} | {3,3} - 5 | 3 | {3,3,4} | {3,4} - 5 | 4 | {3,4,4} | {3,4} - 5 | 4 | {4,4,4} | {4,4} - 5 | 4 | {4,4,5} | {4,5} - 5 | 5 | {4,5,5} | {4,5} - 5 | 5 | {5,5} | {5} - 5 | 5 | {5,5,5} | {5,5} -(66 rows) - --- repeat above 3 tests without grouping by distribution column -SELECT - value_2, - rank() OVER (PARTITION BY value_2 ROWS BETWEEN - UNBOUNDED PRECEDING AND CURRENT ROW), - dense_rank() OVER (PARTITION BY value_2 RANGE BETWEEN - UNBOUNDED PRECEDING AND CURRENT ROW), - CUME_DIST() OVER (PARTITION BY value_2 RANGE BETWEEN - UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), - PERCENT_RANK() OVER (PARTITION BY value_2 ORDER BY avg(value_1) RANGE BETWEEN - UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) -FROM - users_table -GROUP BY - 1 -ORDER BY - 4 DESC,3 DESC,2 DESC ,1 DESC; - value_2 | rank | dense_rank | cume_dist | percent_rank ---------------------------------------------------------------------- - 5 | 1 | 1 | 1 | 0 - 4 | 1 | 1 | 1 | 0 - 3 | 1 | 1 | 1 | 0 - 2 | 1 | 1 | 1 | 0 - 1 | 1 | 1 | 1 | 0 - 0 | 1 | 1 | 1 | 0 -(6 rows) - --- test exclude supported -SELECT - value_2, - value_1, - array_agg(value_1) OVER (PARTITION BY value_2 ORDER BY value_1 RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), - array_agg(value_1) OVER (PARTITION BY value_2 ORDER BY value_1 RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE CURRENT ROW) -FROM - users_table -WHERE - value_2 > 2 AND value_2 < 6 -ORDER BY - value_2, value_1, 3, 4; - value_2 | value_1 | array_agg | array_agg ---------------------------------------------------------------------- - 3 | 0 | {0,0,0} | {0,0} - 3 | 0 | {0,0,0} | {0,0} - 3 | 0 | {0,0,0} | {0,0} - 3 | 1 | {0,0,0,1,1,1,1} | {0,0,0,1,1,1} - 3 | 1 | {0,0,0,1,1,1,1} | {0,0,0,1,1,1} - 3 | 1 | {0,0,0,1,1,1,1} | {0,0,0,1,1,1} - 3 | 1 | {0,0,0,1,1,1,1} | {0,0,0,1,1,1} - 3 | 2 | {0,0,0,1,1,1,1,2,2} | {0,0,0,1,1,1,1,2} - 3 | 2 | {0,0,0,1,1,1,1,2,2} | {0,0,0,1,1,1,1,2} - 3 | 3 | {0,0,0,1,1,1,1,2,2,3,3} | {0,0,0,1,1,1,1,2,2,3} - 3 | 3 | {0,0,0,1,1,1,1,2,2,3,3} | {0,0,0,1,1,1,1,2,2,3} - 3 | 4 | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4,4} | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4} - 3 | 4 | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4,4} | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4} - 3 | 4 | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4,4} | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4} - 3 | 4 | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4,4} | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4} - 3 | 4 | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4,4} | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4} - 3 | 5 | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4,4,5} | {0,0,0,1,1,1,1,2,2,3,3,4,4,4,4,4} - 4 | 0 | {0,0} | {0} - 4 | 0 | {0,0} | {0} - 4 | 1 | {0,0,1,1} | {0,0,1} - 4 | 1 | {0,0,1,1} | {0,0,1} - 4 | 2 | {0,0,1,1,2,2,2} | {0,0,1,1,2,2} - 4 | 2 | {0,0,1,1,2,2,2} | {0,0,1,1,2,2} - 4 | 2 | {0,0,1,1,2,2,2} | {0,0,1,1,2,2} - 4 | 3 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3} | {0,0,1,1,2,2,2,3,3,3,3,3,3} - 4 | 3 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3} | {0,0,1,1,2,2,2,3,3,3,3,3,3} - 4 | 3 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3} | {0,0,1,1,2,2,2,3,3,3,3,3,3} - 4 | 3 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3} | {0,0,1,1,2,2,2,3,3,3,3,3,3} - 4 | 3 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3} | {0,0,1,1,2,2,2,3,3,3,3,3,3} - 4 | 3 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3} | {0,0,1,1,2,2,2,3,3,3,3,3,3} - 4 | 3 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3} | {0,0,1,1,2,2,2,3,3,3,3,3,3} - 4 | 4 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4} - 4 | 4 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4} - 4 | 4 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4} - 4 | 4 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4} - 4 | 5 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4,4,5,5} | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4,4,5} - 4 | 5 | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4,4,5,5} | {0,0,1,1,2,2,2,3,3,3,3,3,3,3,4,4,4,4,5} - 5 | 0 | {0,0} | {0} - 5 | 0 | {0,0} | {0} - 5 | 1 | {0,0,1} | {0,0} - 5 | 2 | {0,0,1,2,2} | {0,0,1,2} - 5 | 2 | {0,0,1,2,2} | {0,0,1,2} - 5 | 3 | {0,0,1,2,2,3,3,3,3} | {0,0,1,2,2,3,3,3} - 5 | 3 | {0,0,1,2,2,3,3,3,3} | {0,0,1,2,2,3,3,3} - 5 | 3 | {0,0,1,2,2,3,3,3,3} | {0,0,1,2,2,3,3,3} - 5 | 3 | {0,0,1,2,2,3,3,3,3} | {0,0,1,2,2,3,3,3} - 5 | 4 | {0,0,1,2,2,3,3,3,3,4,4} | {0,0,1,2,2,3,3,3,3,4} - 5 | 4 | {0,0,1,2,2,3,3,3,3,4,4} | {0,0,1,2,2,3,3,3,3,4} - 5 | 5 | {0,0,1,2,2,3,3,3,3,4,4,5,5} | {0,0,1,2,2,3,3,3,3,4,4,5} - 5 | 5 | {0,0,1,2,2,3,3,3,3,4,4,5,5} | {0,0,1,2,2,3,3,3,3,4,4,5} -(50 rows) - --- test preceding and following on RANGE window -SELECT - value_2, - value_1, - array_agg(value_1) OVER range_window, - array_agg(value_1) OVER range_window_exclude -FROM - users_table -WHERE - value_2 > 2 AND value_2 < 6 -WINDOW - range_window as (PARTITION BY value_2 ORDER BY value_1 RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING), - range_window_exclude as (PARTITION BY value_2 ORDER BY value_1 RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) -ORDER BY - value_2, value_1, 3, 4; - value_2 | value_1 | array_agg | array_agg ---------------------------------------------------------------------- - 3 | 0 | {0,0,0,1,1,1,1} | {0,0,1,1,1,1} - 3 | 0 | {0,0,0,1,1,1,1} | {0,0,1,1,1,1} - 3 | 0 | {0,0,0,1,1,1,1} | {0,0,1,1,1,1} - 3 | 1 | {0,0,0,1,1,1,1,2,2} | {0,0,0,1,1,1,2,2} - 3 | 1 | {0,0,0,1,1,1,1,2,2} | {0,0,0,1,1,1,2,2} - 3 | 1 | {0,0,0,1,1,1,1,2,2} | {0,0,0,1,1,1,2,2} - 3 | 1 | {0,0,0,1,1,1,1,2,2} | {0,0,0,1,1,1,2,2} - 3 | 2 | {1,1,1,1,2,2,3,3} | {1,1,1,1,2,3,3} - 3 | 2 | {1,1,1,1,2,2,3,3} | {1,1,1,1,2,3,3} - 3 | 3 | {2,2,3,3,4,4,4,4,4} | {2,2,3,4,4,4,4,4} - 3 | 3 | {2,2,3,3,4,4,4,4,4} | {2,2,3,4,4,4,4,4} - 3 | 4 | {3,3,4,4,4,4,4,5} | {3,3,4,4,4,4,5} - 3 | 4 | {3,3,4,4,4,4,4,5} | {3,3,4,4,4,4,5} - 3 | 4 | {3,3,4,4,4,4,4,5} | {3,3,4,4,4,4,5} - 3 | 4 | {3,3,4,4,4,4,4,5} | {3,3,4,4,4,4,5} - 3 | 4 | {3,3,4,4,4,4,4,5} | {3,3,4,4,4,4,5} - 3 | 5 | {4,4,4,4,4,5} | {4,4,4,4,4} - 4 | 0 | {0,0,1,1} | {0,1,1} - 4 | 0 | {0,0,1,1} | {0,1,1} - 4 | 1 | {0,0,1,1,2,2,2} | {0,0,1,2,2,2} - 4 | 1 | {0,0,1,1,2,2,2} | {0,0,1,2,2,2} - 4 | 2 | {1,1,2,2,2,3,3,3,3,3,3,3} | {1,1,2,2,3,3,3,3,3,3,3} - 4 | 2 | {1,1,2,2,2,3,3,3,3,3,3,3} | {1,1,2,2,3,3,3,3,3,3,3} - 4 | 2 | {1,1,2,2,2,3,3,3,3,3,3,3} | {1,1,2,2,3,3,3,3,3,3,3} - 4 | 3 | {2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {2,2,2,3,3,3,3,3,3,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {2,2,2,3,3,3,3,3,3,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {2,2,2,3,3,3,3,3,3,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {2,2,2,3,3,3,3,3,3,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {2,2,2,3,3,3,3,3,3,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {2,2,2,3,3,3,3,3,3,4,4,4,4} - 4 | 3 | {2,2,2,3,3,3,3,3,3,3,4,4,4,4} | {2,2,2,3,3,3,3,3,3,4,4,4,4} - 4 | 4 | {3,3,3,3,3,3,3,4,4,4,4,5,5} | {3,3,3,3,3,3,3,4,4,4,5,5} - 4 | 4 | {3,3,3,3,3,3,3,4,4,4,4,5,5} | {3,3,3,3,3,3,3,4,4,4,5,5} - 4 | 4 | {3,3,3,3,3,3,3,4,4,4,4,5,5} | {3,3,3,3,3,3,3,4,4,4,5,5} - 4 | 4 | {3,3,3,3,3,3,3,4,4,4,4,5,5} | {3,3,3,3,3,3,3,4,4,4,5,5} - 4 | 5 | {4,4,4,4,5,5} | {4,4,4,4,5} - 4 | 5 | {4,4,4,4,5,5} | {4,4,4,4,5} - 5 | 0 | {0,0,1} | {0,1} - 5 | 0 | {0,0,1} | {0,1} - 5 | 1 | {0,0,1,2,2} | {0,0,2,2} - 5 | 2 | {1,2,2,3,3,3,3} | {1,2,3,3,3,3} - 5 | 2 | {1,2,2,3,3,3,3} | {1,2,3,3,3,3} - 5 | 3 | {2,2,3,3,3,3,4,4} | {2,2,3,3,3,4,4} - 5 | 3 | {2,2,3,3,3,3,4,4} | {2,2,3,3,3,4,4} - 5 | 3 | {2,2,3,3,3,3,4,4} | {2,2,3,3,3,4,4} - 5 | 3 | {2,2,3,3,3,3,4,4} | {2,2,3,3,3,4,4} - 5 | 4 | {3,3,3,3,4,4,5,5} | {3,3,3,3,4,5,5} - 5 | 4 | {3,3,3,3,4,4,5,5} | {3,3,3,3,4,5,5} - 5 | 5 | {4,4,5,5} | {4,4,5} - 5 | 5 | {4,4,5,5} | {4,4,5} -(50 rows) - --- test preceding and following on ROW window -SELECT - value_2, - value_1, - array_agg(value_1) OVER row_window, - array_agg(value_1) OVER row_window_exclude -FROM - users_table -WHERE - value_2 > 2 and value_2 < 6 -WINDOW - row_window as (PARTITION BY value_2 ORDER BY value_1 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING), - row_window_exclude as (PARTITION BY value_2 ORDER BY value_1 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) -ORDER BY - value_2, value_1, 3, 4; - value_2 | value_1 | array_agg | array_agg ---------------------------------------------------------------------- - 3 | 0 | {0,0} | {0} - 3 | 0 | {0,0,0} | {0,0} - 3 | 0 | {0,0,1} | {0,1} - 3 | 1 | {0,1,1} | {0,1} - 3 | 1 | {1,1,1} | {1,1} - 3 | 1 | {1,1,1} | {1,1} - 3 | 1 | {1,1,2} | {1,2} - 3 | 2 | {1,2,2} | {1,2} - 3 | 2 | {2,2,3} | {2,3} - 3 | 3 | {2,3,3} | {2,3} - 3 | 3 | {3,3,4} | {3,4} - 3 | 4 | {3,4,4} | {3,4} - 3 | 4 | {4,4,4} | {4,4} - 3 | 4 | {4,4,4} | {4,4} - 3 | 4 | {4,4,4} | {4,4} - 3 | 4 | {4,4,5} | {4,5} - 3 | 5 | {4,5} | {4} - 4 | 0 | {0,0} | {0} - 4 | 0 | {0,0,1} | {0,1} - 4 | 1 | {0,1,1} | {0,1} - 4 | 1 | {1,1,2} | {1,2} - 4 | 2 | {1,2,2} | {1,2} - 4 | 2 | {2,2,2} | {2,2} - 4 | 2 | {2,2,3} | {2,3} - 4 | 3 | {2,3,3} | {2,3} - 4 | 3 | {3,3,3} | {3,3} - 4 | 3 | {3,3,3} | {3,3} - 4 | 3 | {3,3,3} | {3,3} - 4 | 3 | {3,3,3} | {3,3} - 4 | 3 | {3,3,3} | {3,3} - 4 | 3 | {3,3,4} | {3,4} - 4 | 4 | {3,4,4} | {3,4} - 4 | 4 | {4,4,4} | {4,4} - 4 | 4 | {4,4,4} | {4,4} - 4 | 4 | {4,4,5} | {4,5} - 4 | 5 | {4,5,5} | {4,5} - 4 | 5 | {5,5} | {5} - 5 | 0 | {0,0} | {0} - 5 | 0 | {0,0,1} | {0,1} - 5 | 1 | {0,1,2} | {0,2} - 5 | 2 | {1,2,2} | {1,2} - 5 | 2 | {2,2,3} | {2,3} - 5 | 3 | {2,3,3} | {2,3} - 5 | 3 | {3,3,3} | {3,3} - 5 | 3 | {3,3,3} | {3,3} - 5 | 3 | {3,3,4} | {3,4} - 5 | 4 | {3,4,4} | {3,4} - 5 | 4 | {4,4,5} | {4,5} - 5 | 5 | {4,5,5} | {4,5} - 5 | 5 | {5,5} | {5} -(50 rows) - --- some tests with GROUP BY, HAVING and LIMIT -SELECT - user_id, sum(event_type) OVER my_win , event_type -FROM - events_table -GROUP BY - user_id, event_type -HAVING count(*) > 2 - WINDOW my_win AS (PARTITION BY user_id, max(event_type) ORDER BY count(*) DESC) -ORDER BY - 2 DESC, 3 DESC, 1 DESC -LIMIT - 5; - user_id | sum | event_type ---------------------------------------------------------------------- - 4 | 4 | 4 - 3 | 4 | 4 - 2 | 4 | 4 - 1 | 4 | 4 - 5 | 3 | 3 -(5 rows) - --- test PARTITION BY avg(...) ORDER BY avg(...) -SELECT - value_1, - avg(value_3), - dense_rank() OVER (PARTITION BY avg(value_3) ORDER BY avg(value_2)) -FROM - users_table -GROUP BY - 1 -ORDER BY - 1; - value_1 | avg | dense_rank ---------------------------------------------------------------------- - 0 | 3.08333333333333 | 1 - 1 | 2.93333333333333 | 1 - 2 | 2.22222222222222 | 1 - 3 | 2.73076923076923 | 1 - 4 | 2.9047619047619 | 1 - 5 | 2.22222222222222 | 2 -(6 rows) - --- Group by has more columns than partition by -SELECT - DISTINCT user_id, SUM(value_2) OVER (PARTITION BY user_id) -FROM - users_table -GROUP BY - user_id, value_1, value_2 -HAVING count(*) > 2 -ORDER BY - 2 DESC, 1 -LIMIT - 10; - user_id | sum ---------------------------------------------------------------------- - 5 | 3 - 4 | 2 -(2 rows) - -SELECT - DISTINCT ON (user_id) user_id, SUM(value_2) OVER (PARTITION BY user_id) -FROM - users_table -GROUP BY - user_id, value_1, value_2 -HAVING count(*) > 2 -ORDER BY - 1, 2 DESC -LIMIT - 10; - user_id | sum ---------------------------------------------------------------------- - 4 | 2 - 5 | 3 -(2 rows) - -SELECT - DISTINCT ON (SUM(value_1) OVER (PARTITION BY user_id)) user_id, SUM(value_2) OVER (PARTITION BY user_id) -FROM - users_table -GROUP BY - user_id, value_1, value_2 -HAVING count(*) > 2 -ORDER BY - (SUM(value_1) OVER (PARTITION BY user_id)) , 2 DESC, 1 -LIMIT - 10; - user_id | sum ---------------------------------------------------------------------- - 5 | 3 - 4 | 2 -(2 rows) - --- not a meaningful query, with interesting syntax -SELECT - user_id, - AVG(avg(value_1)) OVER (PARTITION BY user_id, max(user_id), MIN(value_2)), - AVG(avg(user_id)) OVER (PARTITION BY user_id, min(user_id), AVG(value_1)) -FROM - users_table -GROUP BY - 1 -ORDER BY - 3 DESC, 2 DESC, 1 DESC; - user_id | avg | avg ---------------------------------------------------------------------- - 6 | 2.1000000000000000 | 6.0000000000000000 - 5 | 2.6538461538461538 | 5.0000000000000000 - 4 | 2.7391304347826087 | 4.0000000000000000 - 3 | 2.3529411764705882 | 3.0000000000000000 - 2 | 2.3333333333333333 | 2.0000000000000000 - 1 | 3.2857142857142857 | 1.00000000000000000000 -(6 rows) - -SELECT coordinator_plan($Q$ -EXPLAIN (COSTS FALSE) -SELECT - user_id, - AVG(avg(value_1)) OVER (PARTITION BY user_id, max(user_id), MIN(value_2)), - AVG(avg(user_id)) OVER (PARTITION BY user_id, min(user_id), AVG(value_1)) -FROM - users_table -GROUP BY - 1 -ORDER BY - 3 DESC, 2 DESC, 1 DESC; -$Q$); - coordinator_plan ---------------------------------------------------------------------- - Sort - Sort Key: remote_scan.avg_1 DESC, remote_scan.avg DESC, remote_scan.user_id DESC - -> Custom Scan (Citus Adaptive) - Task Count: 4 -(4 rows) - -SELECT - value_2, - AVG(avg(value_1)) OVER (PARTITION BY value_2, max(value_2), MIN(value_2)), - AVG(avg(value_2)) OVER (PARTITION BY value_2, min(value_2), AVG(value_1)) -FROM - users_table -GROUP BY - 1 -ORDER BY - 3 DESC, 2 DESC, 1 DESC; - value_2 | avg | avg ---------------------------------------------------------------------- - 5 | 2.6923076923076923 | 5.0000000000000000 - 4 | 2.7500000000000000 | 4.0000000000000000 - 3 | 2.2941176470588235 | 3.0000000000000000 - 2 | 2.7619047619047619 | 2.0000000000000000 - 1 | 2.4285714285714286 | 1.00000000000000000000 - 0 | 2.2222222222222222 | 0.00000000000000000000 -(6 rows) - -SELECT - value_2, user_id, - AVG(avg(value_1)) OVER (PARTITION BY value_2, max(value_2), MIN(value_2)), - AVG(avg(value_2)) OVER (PARTITION BY user_id, min(value_2), AVG(value_1)) -FROM - users_table -GROUP BY - 1, 2 -ORDER BY - 3 DESC, 2 DESC, 1 DESC; - value_2 | user_id | avg | avg ---------------------------------------------------------------------- - 5 | 5 | 2.6666666666666667 | 5.0000000000000000 - 5 | 4 | 2.6666666666666667 | 5.0000000000000000 - 5 | 3 | 2.6666666666666667 | 5.0000000000000000 - 5 | 2 | 2.6666666666666667 | 5.0000000000000000 - 2 | 6 | 2.54583333333333333333 | 2.0000000000000000 - 2 | 5 | 2.54583333333333333333 | 2.0000000000000000 - 2 | 4 | 2.54583333333333333333 | 2.0000000000000000 - 2 | 3 | 2.54583333333333333333 | 2.0000000000000000 - 2 | 2 | 2.54583333333333333333 | 2.0000000000000000 - 2 | 1 | 2.54583333333333333333 | 2.0000000000000000 - 0 | 6 | 2.50000000000000000000 | 0.00000000000000000000 - 0 | 5 | 2.50000000000000000000 | 0.00000000000000000000 - 0 | 4 | 2.50000000000000000000 | 0.00000000000000000000 - 0 | 2 | 2.50000000000000000000 | 0.00000000000000000000 - 0 | 1 | 2.50000000000000000000 | 0.00000000000000000000 - 4 | 6 | 2.45555555555555555000 | 4.0000000000000000 - 4 | 5 | 2.45555555555555555000 | 4.0000000000000000 - 4 | 4 | 2.45555555555555555000 | 4.0000000000000000 - 4 | 3 | 2.45555555555555555000 | 4.0000000000000000 - 4 | 2 | 2.45555555555555555000 | 4.0000000000000000 - 4 | 1 | 2.45555555555555555000 | 4.0000000000000000 - 3 | 6 | 2.3500000000000000 | 3.0000000000000000 - 3 | 5 | 2.3500000000000000 | 3.0000000000000000 - 3 | 4 | 2.3500000000000000 | 3.0000000000000000 - 3 | 3 | 2.3500000000000000 | 3.0000000000000000 - 3 | 2 | 2.3500000000000000 | 3.0000000000000000 - 3 | 1 | 2.3500000000000000 | 3.0000000000000000 - 1 | 6 | 1.90666666666666666000 | 1.00000000000000000000 - 1 | 5 | 1.90666666666666666000 | 1.00000000000000000000 - 1 | 4 | 1.90666666666666666000 | 1.00000000000000000000 - 1 | 3 | 1.90666666666666666000 | 1.00000000000000000000 - 1 | 2 | 1.90666666666666666000 | 1.00000000000000000000 -(32 rows) - -SELECT user_id, sum(avg(user_id)) OVER () -FROM users_table -GROUP BY user_id -ORDER BY 1 -LIMIT 10; - user_id | sum ---------------------------------------------------------------------- - 1 | 21.00000000000000000000 - 2 | 21.00000000000000000000 - 3 | 21.00000000000000000000 - 4 | 21.00000000000000000000 - 5 | 21.00000000000000000000 - 6 | 21.00000000000000000000 -(6 rows) - -SELECT - user_id, - 1 + sum(value_1), - 1 + AVG(value_2) OVER (partition by user_id) -FROM - users_table -GROUP BY - user_id, value_2 -ORDER BY - user_id, value_2; - user_id | ?column? | ?column? ---------------------------------------------------------------------- - 1 | 5 | 3.2500000000000000 - 1 | 4 | 3.2500000000000000 - 1 | 6 | 3.2500000000000000 - 1 | 12 | 3.2500000000000000 - 2 | 3 | 3.5000000000000000 - 2 | 5 | 3.5000000000000000 - 2 | 13 | 3.5000000000000000 - 2 | 6 | 3.5000000000000000 - 2 | 17 | 3.5000000000000000 - 2 | 4 | 3.5000000000000000 - 3 | 3 | 4.0000000000000000 - 3 | 13 | 4.0000000000000000 - 3 | 10 | 4.0000000000000000 - 3 | 2 | 4.0000000000000000 - 3 | 17 | 4.0000000000000000 - 4 | 4 | 3.5000000000000000 - 4 | 28 | 3.5000000000000000 - 4 | 1 | 3.5000000000000000 - 4 | 11 | 3.5000000000000000 - 4 | 17 | 3.5000000000000000 - 4 | 8 | 3.5000000000000000 - 5 | 7 | 3.5000000000000000 - 5 | 17 | 3.5000000000000000 - 5 | 24 | 3.5000000000000000 - 5 | 9 | 3.5000000000000000 - 5 | 8 | 3.5000000000000000 - 5 | 10 | 3.5000000000000000 - 6 | 6 | 3.0000000000000000 - 6 | 3 | 3.0000000000000000 - 6 | 9 | 3.0000000000000000 - 6 | 3 | 3.0000000000000000 - 6 | 5 | 3.0000000000000000 -(32 rows) - -SELECT - user_id, - 1 + sum(value_1), - 1 + AVG(value_2) OVER (partition by user_id) -FROM - users_table -GROUP BY - user_id, value_2 -ORDER BY - 2 DESC, 1 -LIMIT 5; - user_id | ?column? | ?column? ---------------------------------------------------------------------- - 4 | 28 | 3.5000000000000000 - 5 | 24 | 3.5000000000000000 - 2 | 17 | 3.5000000000000000 - 3 | 17 | 4.0000000000000000 - 4 | 17 | 3.5000000000000000 -(5 rows) - --- rank and ordering in the reverse order -SELECT - user_id, - avg(value_1), - RANK() OVER (partition by user_id order by value_2) -FROM - users_table -GROUP BY user_id, value_2 -ORDER BY user_id, value_2 DESC; - user_id | avg | rank ---------------------------------------------------------------------- - 1 | 3.6666666666666667 | 4 - 1 | 2.5000000000000000 | 3 - 1 | 3.0000000000000000 | 2 - 1 | 4.0000000000000000 | 1 - 2 | 1.5000000000000000 | 6 - 2 | 3.2000000000000000 | 5 - 2 | 1.6666666666666667 | 4 - 2 | 3.0000000000000000 | 3 - 2 | 1.3333333333333333 | 2 - 2 | 2.0000000000000000 | 1 - 3 | 2.6666666666666667 | 5 - 3 | 1.00000000000000000000 | 4 - 3 | 3.0000000000000000 | 3 - 3 | 2.4000000000000000 | 2 - 3 | 1.00000000000000000000 | 1 - 4 | 3.5000000000000000 | 6 - 4 | 3.2000000000000000 | 5 - 4 | 3.3333333333333333 | 4 - 4 | 0.00000000000000000000 | 3 - 4 | 3.0000000000000000 | 2 - 4 | 1.00000000000000000000 | 1 - 5 | 3.0000000000000000 | 6 - 5 | 2.3333333333333333 | 5 - 5 | 1.6000000000000000 | 4 - 5 | 2.8750000000000000 | 3 - 5 | 3.2000000000000000 | 2 - 5 | 3.0000000000000000 | 1 - 6 | 1.3333333333333333 | 5 - 6 | 2.0000000000000000 | 4 - 6 | 4.0000000000000000 | 3 - 6 | 1.00000000000000000000 | 2 - 6 | 2.5000000000000000 | 1 -(32 rows) - --- order by in the window function is same as avg(value_1) DESC -SELECT - user_id, - avg(value_1), - RANK() OVER (partition by user_id order by 1 / (1 + avg(value_1))) -FROM - users_table -GROUP BY user_id, value_2 -ORDER BY user_id, avg(value_1) DESC; - user_id | avg | rank ---------------------------------------------------------------------- - 1 | 4.0000000000000000 | 1 - 1 | 3.6666666666666667 | 2 - 1 | 3.0000000000000000 | 3 - 1 | 2.5000000000000000 | 4 - 2 | 3.2000000000000000 | 1 - 2 | 3.0000000000000000 | 2 - 2 | 2.0000000000000000 | 3 - 2 | 1.6666666666666667 | 4 - 2 | 1.5000000000000000 | 5 - 2 | 1.3333333333333333 | 6 - 3 | 3.0000000000000000 | 1 - 3 | 2.6666666666666667 | 2 - 3 | 2.4000000000000000 | 3 - 3 | 1.00000000000000000000 | 4 - 3 | 1.00000000000000000000 | 4 - 4 | 3.5000000000000000 | 1 - 4 | 3.3333333333333333 | 2 - 4 | 3.2000000000000000 | 3 - 4 | 3.0000000000000000 | 4 - 4 | 1.00000000000000000000 | 5 - 4 | 0.00000000000000000000 | 6 - 5 | 3.2000000000000000 | 1 - 5 | 3.0000000000000000 | 2 - 5 | 3.0000000000000000 | 2 - 5 | 2.8750000000000000 | 4 - 5 | 2.3333333333333333 | 5 - 5 | 1.6000000000000000 | 6 - 6 | 4.0000000000000000 | 1 - 6 | 2.5000000000000000 | 2 - 6 | 2.0000000000000000 | 3 - 6 | 1.3333333333333333 | 4 - 6 | 1.00000000000000000000 | 5 -(32 rows) - -EXPLAIN (COSTS FALSE) -SELECT - user_id, - avg(value_1), - RANK() OVER (partition by user_id order by 1 / (1 + avg(value_1))) -FROM - users_table -GROUP BY user_id, value_2 -ORDER BY user_id, avg(value_1) DESC; - QUERY PLAN ---------------------------------------------------------------------- - Sort - Sort Key: remote_scan.user_id, remote_scan.avg DESC - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> WindowAgg - -> Sort - Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1)))) - -> HashAggregate - Group Key: users_table.user_id, users_table.value_2 - -> Seq Scan on users_table_1400256 users_table -(13 rows) - --- order by in the window function is same as avg(value_1) DESC -SELECT - user_id, - avg(value_1), - RANK() OVER (partition by user_id order by 1 / (1 + avg(value_1))) -FROM - users_table -GROUP BY user_id, value_2 -ORDER BY user_id, avg(value_1) DESC; - user_id | avg | rank ---------------------------------------------------------------------- - 1 | 4.0000000000000000 | 1 - 1 | 3.6666666666666667 | 2 - 1 | 3.0000000000000000 | 3 - 1 | 2.5000000000000000 | 4 - 2 | 3.2000000000000000 | 1 - 2 | 3.0000000000000000 | 2 - 2 | 2.0000000000000000 | 3 - 2 | 1.6666666666666667 | 4 - 2 | 1.5000000000000000 | 5 - 2 | 1.3333333333333333 | 6 - 3 | 3.0000000000000000 | 1 - 3 | 2.6666666666666667 | 2 - 3 | 2.4000000000000000 | 3 - 3 | 1.00000000000000000000 | 4 - 3 | 1.00000000000000000000 | 4 - 4 | 3.5000000000000000 | 1 - 4 | 3.3333333333333333 | 2 - 4 | 3.2000000000000000 | 3 - 4 | 3.0000000000000000 | 4 - 4 | 1.00000000000000000000 | 5 - 4 | 0.00000000000000000000 | 6 - 5 | 3.2000000000000000 | 1 - 5 | 3.0000000000000000 | 2 - 5 | 3.0000000000000000 | 2 - 5 | 2.8750000000000000 | 4 - 5 | 2.3333333333333333 | 5 - 5 | 1.6000000000000000 | 6 - 6 | 4.0000000000000000 | 1 - 6 | 2.5000000000000000 | 2 - 6 | 2.0000000000000000 | 3 - 6 | 1.3333333333333333 | 4 - 6 | 1.00000000000000000000 | 5 -(32 rows) - --- limit is not pushed down to worker !! -EXPLAIN (COSTS FALSE) -SELECT - user_id, - avg(value_1), - RANK() OVER (partition by user_id order by 1 / (1 + avg(value_1))) -FROM - users_table -GROUP BY user_id, value_2 -ORDER BY user_id, avg(value_1) DESC -LIMIT 5; - QUERY PLAN ---------------------------------------------------------------------- - Limit - -> Sort - Sort Key: remote_scan.user_id, remote_scan.avg DESC - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Limit - -> Incremental Sort - Sort Key: users_table.user_id, (avg(users_table.value_1)) DESC - Presorted Key: users_table.user_id - -> WindowAgg - -> Sort - Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1)))) - -> HashAggregate - Group Key: users_table.user_id, users_table.value_2 - -> Seq Scan on users_table_1400256 users_table -(18 rows) - -EXPLAIN (COSTS FALSE) -SELECT - user_id, - avg(value_1), - RANK() OVER (partition by user_id order by 1 / (1 + avg(value_1))) -FROM - users_table -GROUP BY user_id, value_2 -ORDER BY user_id, avg(value_1) DESC -LIMIT 5; - QUERY PLAN ---------------------------------------------------------------------- - Limit - -> Sort - Sort Key: remote_scan.user_id, remote_scan.avg DESC - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Limit - -> Incremental Sort - Sort Key: users_table.user_id, (avg(users_table.value_1)) DESC - Presorted Key: users_table.user_id - -> WindowAgg - -> Sort - Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1)))) - -> HashAggregate - Group Key: users_table.user_id, users_table.value_2 - -> Seq Scan on users_table_1400256 users_table -(18 rows) - -EXPLAIN (COSTS FALSE) -SELECT - user_id, - avg(value_1), - RANK() OVER (partition by user_id order by 1 / (1 + sum(value_2))) -FROM - users_table -GROUP BY user_id, value_2 -ORDER BY user_id, avg(value_1) DESC -LIMIT 5; - QUERY PLAN ---------------------------------------------------------------------- - Limit - -> Sort - Sort Key: remote_scan.user_id, remote_scan.avg DESC - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Limit - -> Incremental Sort - Sort Key: users_table.user_id, (avg(users_table.value_1)) DESC - Presorted Key: users_table.user_id - -> WindowAgg - -> Sort - Sort Key: users_table.user_id, ((1 / (1 + sum(users_table.value_2)))) - -> HashAggregate - Group Key: users_table.user_id, users_table.value_2 - -> Seq Scan on users_table_1400256 users_table -(18 rows) - -EXPLAIN (COSTS FALSE) -SELECT - user_id, - avg(value_1), - RANK() OVER (partition by user_id order by sum(value_2)) -FROM - users_table -GROUP BY user_id, value_2 -ORDER BY user_id, avg(value_1) DESC -LIMIT 5; - QUERY PLAN ---------------------------------------------------------------------- - Limit - -> Sort - Sort Key: remote_scan.user_id, remote_scan.avg DESC - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Limit - -> Incremental Sort - Sort Key: users_table.user_id, (avg(users_table.value_1)) DESC - Presorted Key: users_table.user_id - -> WindowAgg - -> Sort - Sort Key: users_table.user_id, (sum(users_table.value_2)) - -> HashAggregate - Group Key: users_table.user_id, users_table.value_2 - -> Seq Scan on users_table_1400256 users_table -(18 rows) - --- Grouping can be pushed down with aggregates even when window function can't -EXPLAIN (COSTS FALSE) -SELECT user_id, count(value_1), stddev(value_1), count(user_id) OVER (PARTITION BY random()) -FROM users_table GROUP BY user_id HAVING avg(value_1) > 2 LIMIT 1; - QUERY PLAN ---------------------------------------------------------------------- - Limit - -> WindowAgg - -> Sort - Sort Key: remote_scan.worker_column_5 - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: user_id - Filter: (avg(value_1) > '2'::numeric) - -> Seq Scan on users_table_1400256 users_table -(13 rows) - --- Window function with inlined CTE -WITH cte as ( - SELECT uref.id user_id, events_table.value_2, count(*) c - FROM events_table - JOIN users_ref_test_table uref ON uref.id = events_table.user_id - GROUP BY 1, 2 -) -SELECT DISTINCT cte.value_2, cte.c, sum(cte.value_2) OVER (PARTITION BY cte.c) -FROM cte JOIN events_table et ON et.value_2 = cte.value_2 and et.value_2 = cte.c -ORDER BY 1; - value_2 | c | sum ---------------------------------------------------------------------- - 3 | 3 | 108 - 4 | 4 | 56 -(2 rows) - --- There was a strange bug where this wouldn't have window functions being pushed down --- Bug dependent on column ordering -CREATE TABLE daily_uniques (value_2 float, user_id bigint); -SELECT create_distributed_table('daily_uniques', 'user_id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -EXPLAIN (COSTS FALSE) SELECT - user_id, - sum(value_2) AS commits, - RANK () OVER ( - PARTITION BY user_id - ORDER BY - sum(value_2) DESC - ) -FROM daily_uniques -GROUP BY user_id -HAVING - sum(value_2) > 0 -ORDER BY commits DESC -LIMIT 10; - QUERY PLAN ---------------------------------------------------------------------- - Limit - -> Sort - Sort Key: remote_scan.commits DESC - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Limit - -> Sort - Sort Key: (sum(daily_uniques.value_2)) DESC - -> WindowAgg - -> Sort - Sort Key: daily_uniques.user_id, (sum(daily_uniques.value_2)) DESC - -> HashAggregate - Group Key: daily_uniques.user_id - Filter: (sum(daily_uniques.value_2) > '0'::double precision) - -> Seq Scan on daily_uniques_xxxxxxx daily_uniques -(18 rows) - -DROP TABLE daily_uniques; --- Partition by reference table column joined to distribution column -SELECT DISTINCT value_2, array_agg(rnk ORDER BY rnk) FROM ( -SELECT events_table.value_2, sum(uref.k_no) OVER (PARTITION BY uref.id) AS rnk -FROM events_table -JOIN users_ref_test_table uref ON uref.id = events_table.user_id) sq -GROUP BY 1 ORDER BY 1; - value_2 | array_agg ---------------------------------------------------------------------- - 0 | {686,686,816,816,987,987,1104} - 1 | {500,500,675,675,675,686,686,816,816,816,987,987,987,987,987,1104,1104,1104,1104,1104,1104,1104} - 2 | {500,500,500,500,675,675,675,675,675,686,686,686,686,816,816,816,816,816,987,987,987,987,987,987,987,1104,1104,1104,1104,1104,1104} - 3 | {500,500,500,500,675,686,686,686,816,816,987,987,987,1104,1104,1104,1104,1104} - 4 | {675,675,675,675,686,686,686,816,816,816,987,987,1104,1104} - 5 | {675,675,816,816,987,987,1104,1104,1104} -(6 rows) - --- https://github.com/citusdata/citus/issues/3754 -select null = sum(null::int2) over () -from public.users_table as ut limit 1; - ?column? ---------------------------------------------------------------------- - -(1 row) - --- verify that this doesn't crash with DEBUG4 -SET log_min_messages TO DEBUG4; -SELECT - user_id, max(value_1) OVER (PARTITION BY user_id, MIN(value_2)) -FROM ( - SELECT - DISTINCT us.user_id, us.value_2, value_1, random() as r1 - FROM - users_table as us, events_table - WHERE - us.user_id = events_table.user_id AND event_type IN (1,2) - ORDER BY - user_id, value_2 - ) s -GROUP BY - 1, value_1 -ORDER BY - 2 DESC, 1; - user_id | max ---------------------------------------------------------------------- - 1 | 5 - 3 | 5 - 3 | 5 - 4 | 5 - 5 | 5 - 5 | 5 - 6 | 5 - 6 | 5 - 1 | 4 - 2 | 4 - 3 | 4 - 3 | 4 - 3 | 4 - 4 | 4 - 4 | 4 - 5 | 4 - 5 | 4 - 1 | 3 - 2 | 3 - 2 | 3 - 2 | 3 - 6 | 3 - 2 | 2 - 4 | 2 - 4 | 2 - 4 | 2 - 6 | 2 - 1 | 1 - 3 | 1 - 5 | 1 - 6 | 1 - 5 | 0 -(32 rows) - diff --git a/src/test/regress/spec/isolation_master_update_node.spec b/src/test/regress/spec/isolation_master_update_node.spec index 5c5a1bb48..3715b6afd 100644 --- a/src/test/regress/spec/isolation_master_update_node.spec +++ b/src/test/regress/spec/isolation_master_update_node.spec @@ -1,7 +1,6 @@ -// Three alternative test outputs: +// Two alternative test outputs: // isolation_master_update_node.out for PG15 // isolation_master_update_node_0.out for PG14 -// isolation_master_update_node_1.out for PG13 setup { diff --git a/src/test/regress/sql/cpu_priority.sql b/src/test/regress/sql/cpu_priority.sql index beb156fa8..ec2206f6f 100644 --- a/src/test/regress/sql/cpu_priority.sql +++ b/src/test/regress/sql/cpu_priority.sql @@ -63,9 +63,6 @@ SET search_path TO cpu_priority; -- in their CREATE SUBSCRIPTION commands. SET citus.log_remote_commands TO ON; SET citus.grep_remote_commands = '%CREATE SUBSCRIPTION%'; --- We disable binary protocol, so we have consistent output between PG13 and --- PG14, beacuse PG13 doesn't support binary logical replication. -SET citus.enable_binary_protocol = false; SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); SET citus.cpu_priority_for_logical_replication_senders = 15; SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); diff --git a/src/test/regress/sql/generated_identity.sql b/src/test/regress/sql/generated_identity.sql index 40021f8d3..df967ddd0 100644 --- a/src/test/regress/sql/generated_identity.sql +++ b/src/test/regress/sql/generated_identity.sql @@ -1,7 +1,3 @@ --- This test file has an alternative output because of error messages vary for PG13 -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13; - CREATE SCHEMA generated_identities; SET search_path TO generated_identities; SET client_min_messages to ERROR; diff --git a/src/test/regress/sql/grant_on_schema_propagation.sql b/src/test/regress/sql/grant_on_schema_propagation.sql index 1cb601ad6..f0bd233a2 100644 --- a/src/test/regress/sql/grant_on_schema_propagation.sql +++ b/src/test/regress/sql/grant_on_schema_propagation.sql @@ -1,7 +1,7 @@ -- -- GRANT_ON_SCHEMA_PROPAGATION -- --- this test has different output for PG13/14 compared to PG15 +-- this test has different output for PG14 compared to PG15 -- In PG15, public schema is owned by pg_database_owner role -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 SHOW server_version \gset @@ -189,6 +189,9 @@ DROP SCHEMA dist_schema CASCADE; SET citus.shard_replication_factor TO 1; SELECT master_remove_node('localhost', :worker_2_port); +-- to avoid different output in PG15 +GRANT CREATE ON SCHEMA public TO public; + -- distribute the public schema (it has to be distributed by now but just in case) CREATE TABLE public_schema_table (id INT); SELECT create_distributed_table('public_schema_table', 'id'); diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index 0529e1e1d..1b8043cdd 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -1,7 +1,7 @@ -- -- MULTI_METADATA_SYNC -- --- this test has different output for PG13/14 compared to PG15 +-- this test has different output for PG14 compared to PG15 -- In PG15, public schema is owned by pg_database_owner role -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 SHOW server_version \gset diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql index be4d2f72d..afac00174 100644 --- a/src/test/regress/sql/pg14.sql +++ b/src/test/regress/sql/pg14.sql @@ -1,11 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14 -\gset -\if :server_version_ge_14 -\else -\q -\endif - create schema pg14; set search_path to pg14; SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/sql/stat_statements.sql b/src/test/regress/sql/stat_statements.sql index 546a5aefa..5afed9215 100644 --- a/src/test/regress/sql/stat_statements.sql +++ b/src/test/regress/sql/stat_statements.sql @@ -3,12 +3,7 @@ -- -- tests citus_stat_statements functionality -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14 -\gset -\if :server_version_ge_14 SET compute_query_id = 'on'; -\endif -- check if pg_stat_statements is available SELECT name FROM pg_available_extensions WHERE name = 'pg_stat_statements'; @@ -50,11 +45,7 @@ SELECT create_distributed_table('test','a'); insert into test values(1); select query, calls from citus_stat_statements(); -\if :server_version_ge_14 SET compute_query_id = 'off'; -\else -set citus.stat_statements_track = 'none'; -\endif -- for pg >= 14, since compute_query_id is off, this insert -- shouldn't be tracked @@ -64,11 +55,7 @@ insert into test values(1); select query, calls from citus_stat_statements(); -\if :server_version_ge_14 SET compute_query_id = 'on'; -\else -RESET citus.stat_statements_track; -\endif SELECT citus_stat_statements_reset(); @@ -290,6 +277,4 @@ DROP TABLE stat_test_text, stat_test_bigint, stat_test_bigint_other, stat_test_r DROP FUNCTION normalize_query_string(text); -\if :server_version_ge_14 SET compute_query_id = 'off'; -\endif diff --git a/src/test/regress/sql/window_functions.sql b/src/test/regress/sql/window_functions.sql index de936c95c..2f7ea18d2 100644 --- a/src/test/regress/sql/window_functions.sql +++ b/src/test/regress/sql/window_functions.sql @@ -3,8 +3,6 @@ -- =================================================================== -- test top level window functions that are pushdownable -- =================================================================== --- This test file has an alternative output because of use of --- incremental sort in some explain outputs in PG13 -- -- a very simple window function with an aggregate and a window function