diff --git a/.gitattributes b/.gitattributes index 2ac7318c4..9a404a4c2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -26,7 +26,6 @@ configure -whitespace # except these exceptions... src/backend/distributed/utils/citus_outfuncs.c -citus-style -src/backend/distributed/utils/ruleutils_96.c -citus-style src/backend/distributed/utils/ruleutils_10.c -citus-style src/backend/distributed/utils/ruleutils_11.c -citus-style src/include/distributed/citus_nodes.h -citus-style diff --git a/configure b/configure index 2b50d9fad..fa5a70cac 100755 --- a/configure +++ b/configure @@ -2530,7 +2530,7 @@ if test -z "$version_num"; then as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5 fi -if test "$version_num" != '9.6' -a "$version_num" != '10' -a "$version_num" != '11'; then +if test "$version_num" != '10' -a "$version_num" != '11'; then as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5 else { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5 diff --git a/configure.in b/configure.in index 741966245..438e46571 100644 --- a/configure.in +++ b/configure.in @@ -74,7 +74,7 @@ if test -z "$version_num"; then AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.]) fi -if test "$version_num" != '9.6' -a "$version_num" != '10' -a "$version_num" != '11'; then +if test "$version_num" != '10' -a "$version_num" != '11'; then AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) else AC_MSG_NOTICE([building against PostgreSQL $version_num]) diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 995a44226..0184a422c 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -1187,18 +1187,14 @@ CreateTruncateTrigger(Oid relationId) /* * RegularTable function returns true if given table's relation kind is RELKIND_RELATION - * (or RELKIND_PARTITIONED_TABLE for PG >= 10), otherwise it returns false. + * or RELKIND_PARTITIONED_TABLE otherwise it returns false. */ bool RegularTable(Oid relationId) { char relationKind = get_rel_relkind(relationId); -#if (PG_VERSION_NUM >= 100000) if (relationKind == RELKIND_RELATION || relationKind == RELKIND_PARTITIONED_TABLE) -#else - if (relationKind == RELKIND_RELATION) -#endif { return true; } @@ -1386,12 +1382,11 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor) /* * RelationUsesIdentityColumns returns whether a given relation uses the SQL - * GENERATED ... AS IDENTITY features supported as of PostgreSQL 10. + * GENERATED ... AS IDENTITY features introduced as of PostgreSQL 10. */ static bool RelationUsesIdentityColumns(TupleDesc relationDesc) { -#if (PG_VERSION_NUM >= 100000) int attributeIndex = 0; for (attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++) @@ -1403,7 +1398,6 @@ RelationUsesIdentityColumns(TupleDesc relationDesc) return true; } } -#endif return false; } diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index f1700c9b9..f22878f6a 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -424,7 +424,6 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag) } /* initialize copy state to read from COPY data source */ -#if (PG_VERSION_NUM >= 100000) copyState = BeginCopyFrom(NULL, copiedDistributedRelation, copyStatement->filename, @@ -432,13 +431,6 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag) NULL, copyStatement->attlist, copyStatement->options); -#else - copyState = BeginCopyFrom(copiedDistributedRelation, - copyStatement->filename, - copyStatement->is_program, - copyStatement->attlist, - copyStatement->options); -#endif /* set up callback to identify error line number */ errorCallback.callback = CopyFromErrorCallback; @@ -533,7 +525,6 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId) (ShardConnections *) palloc0(sizeof(ShardConnections)); /* initialize copy state to read from COPY data source */ -#if (PG_VERSION_NUM >= 100000) CopyState copyState = BeginCopyFrom(NULL, distributedRelation, copyStatement->filename, @@ -541,13 +532,6 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId) NULL, copyStatement->attlist, copyStatement->options); -#else - CopyState copyState = BeginCopyFrom(distributedRelation, - copyStatement->filename, - copyStatement->is_program, - copyStatement->attlist, - copyStatement->options); -#endif CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); copyOutState->delim = (char *) delimiterCharacter; @@ -2232,11 +2216,7 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation, copyStatement->relation = makeRangeVar(NULL, copyDest->intermediateResultIdPrefix, -1); - #if (PG_VERSION_NUM >= 100000) formatResultOption = makeDefElem("format", (Node *) makeString("result"), -1); - #else - formatResultOption = makeDefElem("format", (Node *) makeString("result")); - #endif copyStatement->options = list_make1(formatResultOption); } else @@ -2639,14 +2619,10 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS List *queryTreeList = NIL; StringInfo userFilePath = makeStringInfo(); -#if (PG_VERSION_NUM >= 100000) RawStmt *rawStmt = makeNode(RawStmt); rawStmt->stmt = queryNode; queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); -#else - queryTreeList = pg_analyze_and_rewrite(queryNode, queryString, NULL, 0); -#endif if (list_length(queryTreeList) != 1) { diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index d85d6afc1..79fdd8888 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -26,9 +26,6 @@ #include "utils/relcache.h" -/* Local functions forward declarations for helper functions */ -static char * GetSchemaNameFromDropObject(ListCell *dropSchemaCell); - /* * ProcessDropSchemaStmt invalidates the foreign key cache if any table created * under dropped schema involved in any foreign key relationship. @@ -52,7 +49,9 @@ ProcessDropSchemaStmt(DropStmt *dropStatement) foreach(dropSchemaCell, dropStatement->objects) { - char *schemaString = GetSchemaNameFromDropObject(dropSchemaCell); + Value *schemaValue = (Value *) lfirst(dropSchemaCell); + char *schemaString = strVal(schemaValue); + Oid namespaceOid = get_namespace_oid(schemaString, true); if (namespaceOid == InvalidOid) @@ -135,25 +134,3 @@ PlanAlterObjectSchemaStmt(AlterObjectSchemaStmt *alterObjectSchemaStmt, return NIL; } - - -/* - * GetSchemaNameFromDropObject gets the name of the drop schema from given - * list cell. This function is defined due to API change between PG 9.6 and - * PG 10. - */ -static char * -GetSchemaNameFromDropObject(ListCell *dropSchemaCell) -{ - char *schemaString = NULL; - -#if (PG_VERSION_NUM >= 100000) - Value *schemaValue = (Value *) lfirst(dropSchemaCell); - schemaString = strVal(schemaValue); -#else - List *schemaNameList = (List *) lfirst(dropSchemaCell); - schemaString = NameListToString(schemaNameList); -#endif - - return schemaString; -} diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index fe256feeb..b507c8ca5 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -123,13 +123,12 @@ ProcessDropTableStmt(DropStmt *dropTableStatement) * CreateDistributedTable will attach it to its parent table automatically after * distributing it. * - * This function does nothing if PostgreSQL's version is less then 10 and given - * CreateStmt is not a CREATE TABLE ... PARTITION OF command. + * This function does nothing if the provided CreateStmt is not a CREATE TABLE ... + * PARTITION OF command. */ void ProcessCreateTableStmtPartitionOf(CreateStmt *createStatement) { -#if (PG_VERSION_NUM >= 100000) if (createStatement->inhRelations != NIL && createStatement->partbound != NULL) { RangeVar *parentRelation = linitial(createStatement->inhRelations); @@ -161,7 +160,6 @@ ProcessCreateTableStmtPartitionOf(CreateStmt *createStatement) viaDeprecatedAPI); } } -#endif } @@ -188,13 +186,12 @@ ProcessCreateTableStmtPartitionOf(CreateStmt *createStatement) * operation will be performed via propagating this ALTER TABLE ... ATTACH * PARTITION command to workers. * - * This function does nothing if PostgreSQL's version is less then 10 and given - * CreateStmt is not a ALTER TABLE ... ATTACH PARTITION OF command. + * This function does nothing if the provided CreateStmt is not an ALTER TABLE ... + * ATTACH PARTITION OF command. */ void ProcessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement) { -#if (PG_VERSION_NUM >= 100000) List *commandList = alterTableStatement->cmds; ListCell *commandCell = NULL; @@ -240,7 +237,6 @@ ProcessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement) } } } -#endif } @@ -383,7 +379,6 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo } } } -#if (PG_VERSION_NUM >= 100000) else if (alterTableType == AT_AttachPartition) { PartitionCmd *partitionCommand = (PartitionCmd *) command->def; @@ -418,7 +413,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo rightRelationId = RangeVarGetRelid(partitionCommand->name, NoLock, false); } -#endif + executeSequentially |= SetupExecutionModeForAlterTable(leftRelationId, command); } @@ -990,7 +985,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) break; } -#if (PG_VERSION_NUM >= 100000) case AT_AttachPartition: { Oid relationId = AlterTableLookupRelation(alterTableStatement, @@ -1037,7 +1031,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) break; } -#endif case AT_DropConstraint: { LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); diff --git a/src/backend/distributed/commands/transmit.c b/src/backend/distributed/commands/transmit.c index f73b7029e..ea9e951b3 100644 --- a/src/backend/distributed/commands/transmit.c +++ b/src/backend/distributed/commands/transmit.c @@ -57,12 +57,8 @@ RedirectCopyDataToRegularFile(const char *filename) /* if received data has contents, append to regular file */ if (copyData->len > 0) { -#if (PG_VERSION_NUM >= 100000) int appended = FileWrite(fileDesc, copyData->data, copyData->len, PG_WAIT_IO); -#else - int appended = FileWrite(fileDesc, copyData->data, copyData->len); -#endif if (appended != copyData->len) { @@ -107,12 +103,7 @@ SendRegularFile(const char *filename) SendCopyOutStart(); -#if (PG_VERSION_NUM >= 100000) readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize, PG_WAIT_IO); -#else - readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize); -#endif - while (readBytes > 0) { fileBuffer->len = readBytes; @@ -120,12 +111,8 @@ SendRegularFile(const char *filename) SendCopyData(fileBuffer); resetStringInfo(fileBuffer); -#if (PG_VERSION_NUM >= 100000) readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize, PG_WAIT_IO); -#else - readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize); -#endif } SendCopyDone(); diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index ff6edaab4..3c05aba94 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -67,45 +67,19 @@ static void PostProcessUtility(Node *parsetree); /* - * multi_ProcessUtility9x is the 9.x-compatible wrapper for Citus' main utility - * hook. It simply adapts the old-style hook to call into the new-style (10+) - * hook, which is what now houses all actual logic. - */ -void -multi_ProcessUtility9x(Node *parsetree, - const char *queryString, - ProcessUtilityContext context, - ParamListInfo params, - DestReceiver *dest, - char *completionTag) -{ - PlannedStmt *plannedStmt = makeNode(PlannedStmt); - plannedStmt->commandType = CMD_UTILITY; - plannedStmt->utilityStmt = parsetree; - - multi_ProcessUtility(plannedStmt, queryString, context, params, NULL, dest, - completionTag); -} - - -/* - * CitusProcessUtility is a version-aware wrapper of ProcessUtility to account - * for argument differences between the 9.x and 10+ PostgreSQL versions. + * CitusProcessUtility is a convenience method to create a PlannedStmt out of pieces of a + * utility statement before invoking ProcessUtility. */ void CitusProcessUtility(Node *node, const char *queryString, ProcessUtilityContext context, ParamListInfo params, DestReceiver *dest, char *completionTag) { -#if (PG_VERSION_NUM >= 100000) PlannedStmt *plannedStmt = makeNode(PlannedStmt); plannedStmt->commandType = CMD_UTILITY; plannedStmt->utilityStmt = node; ProcessUtility(plannedStmt, queryString, context, params, NULL, dest, completionTag); -#else - ProcessUtility(node, queryString, context, params, dest, completionTag); -#endif } @@ -139,13 +113,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, * that state. Since we never need to intercept transaction statements, * skip our checks and immediately fall into standard_ProcessUtility. */ -#if (PG_VERSION_NUM >= 100000) standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); -#else - standard_ProcessUtility(parsetree, queryString, context, - params, dest, completionTag); -#endif return; } @@ -163,26 +132,18 @@ multi_ProcessUtility(PlannedStmt *pstmt, * Ensure that utility commands do not behave any differently until CREATE * EXTENSION is invoked. */ -#if (PG_VERSION_NUM >= 100000) standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); -#else - standard_ProcessUtility(parsetree, queryString, context, - params, dest, completionTag); -#endif return; } -#if (PG_VERSION_NUM >= 100000) if (IsA(parsetree, CreateSubscriptionStmt)) { CreateSubscriptionStmt *createSubStmt = (CreateSubscriptionStmt *) parsetree; parsetree = ProcessCreateSubscriptionStmt(createSubStmt); } -#endif - #if (PG_VERSION_NUM >= 110000) if (IsA(parsetree, CallStmt)) { @@ -457,15 +418,9 @@ multi_ProcessUtility(PlannedStmt *pstmt, } } -#if (PG_VERSION_NUM >= 100000) pstmt->utilityStmt = parsetree; standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); -#else - standard_ProcessUtility(parsetree, queryString, context, - params, dest, completionTag); -#endif - /* * We only process CREATE TABLE ... PARTITION OF commands in the function below diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index fee9c9a16..6f5132c21 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -313,25 +313,6 @@ ReportResultError(MultiConnection *connection, PGresult *result, int elevel) } -/* *INDENT-OFF* */ -#if (PG_VERSION_NUM < 100000) - -/* - * Make copy of string with all trailing newline characters removed. - */ -char * -pchomp(const char *in) -{ - size_t n; - - n = strlen(in); - while (n > 0 && in[n - 1] == '\n') - n--; - return pnstrdup(in, n); -} - -#endif - /* *INDENT-ON* */ @@ -712,12 +693,7 @@ FinishConnectionIO(MultiConnection *connection, bool raiseInterrupts) return true; } -#if (PG_VERSION_NUM >= 100000) rc = WaitLatchOrSocket(MyLatch, waitFlags, socket, 0, PG_WAIT_EXTENSION); -#else - rc = WaitLatchOrSocket(MyLatch, waitFlags, socket, 0); -#endif - if (rc & WL_POSTMASTER_DEATH) { ereport(ERROR, (errmsg("postmaster was shut down, exiting"))); @@ -806,10 +782,7 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts) int pendingConnectionCount = totalConnectionCount - pendingConnectionsStartIndex; - /* - * We cannot disable wait events as of postgres 9.6, so we rebuild the - * WaitEventSet whenever connections are ready. - */ + /* rebuild the WaitEventSet whenever connections are ready */ if (rebuildWaitEventSet) { if (waitEventSet != NULL) @@ -824,13 +797,8 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts) } /* wait for I/O events */ -#if (PG_VERSION_NUM >= 100000) eventCount = WaitEventSetWait(waitEventSet, timeout, events, pendingConnectionCount, WAIT_EVENT_CLIENT_READ); -#else - eventCount = WaitEventSetWait(waitEventSet, timeout, events, - pendingConnectionCount); -#endif /* process I/O events */ for (; eventIndex < eventCount; eventIndex++) diff --git a/src/backend/distributed/executor/intermediate_results.c b/src/backend/distributed/executor/intermediate_results.c index 2198f8fb5..8827fa79a 100644 --- a/src/backend/distributed/executor/intermediate_results.c +++ b/src/backend/distributed/executor/intermediate_results.c @@ -412,11 +412,7 @@ RemoteFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest) static void WriteToLocalFile(StringInfo copyData, File fileDesc) { -#if (PG_VERSION_NUM >= 100000) int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len, PG_WAIT_IO); -#else - int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len); -#endif if (bytesWritten < 0) { ereport(ERROR, (errcode_for_file_access(), diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index 172cf4598..0b8073a95 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -270,21 +270,12 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript DefElem *copyOption = NULL; List *copyOptions = NIL; -#if (PG_VERSION_NUM >= 100000) int location = -1; /* "unknown" token location */ copyOption = makeDefElem("format", (Node *) makeString(copyFormat), location); -#else - copyOption = makeDefElem("format", (Node *) makeString(copyFormat)); -#endif copyOptions = lappend(copyOptions, copyOption); -#if (PG_VERSION_NUM >= 100000) copyState = BeginCopyFrom(NULL, stubRelation, fileName, false, NULL, NULL, copyOptions); -#else - copyState = BeginCopyFrom(stubRelation, fileName, false, NULL, - copyOptions); -#endif while (true) { @@ -351,14 +342,8 @@ Query * ParseQueryString(const char *queryString) { Query *query = NULL; - -#if (PG_VERSION_NUM >= 100000) RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); List *queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); -#else - Node *queryTreeNode = ParseTreeNode(queryString); - List *queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0); -#endif if (list_length(queryTreeList) != 1) { @@ -416,11 +401,7 @@ ExecutePlanIntoDestReceiver(PlannedStmt *queryPlan, ParamListInfo params, NULL); PortalStart(portal, params, eflags, GetActiveSnapshot()); -#if (PG_VERSION_NUM >= 100000) PortalRun(portal, count, false, true, dest, dest, NULL); -#else - PortalRun(portal, count, false, dest, dest, NULL); -#endif PortalDrop(portal, false); } diff --git a/src/backend/distributed/master/master_delete_protocol.c b/src/backend/distributed/master/master_delete_protocol.c index ce39f1cef..e8c26469b 100644 --- a/src/backend/distributed/master/master_delete_protocol.c +++ b/src/backend/distributed/master/master_delete_protocol.c @@ -58,9 +58,7 @@ #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/lsyscache.h" -#if (PG_VERSION_NUM >= 100000) #include "utils/varlena.h" -#endif /* Local functions forward declarations */ @@ -112,12 +110,8 @@ master_apply_delete_command(PG_FUNCTION_ARGS) LOCKMODE lockMode = 0; char partitionMethod = 0; bool failOK = false; -#if (PG_VERSION_NUM >= 100000) RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); queryTreeNode = rawStmt->stmt; -#else - queryTreeNode = ParseTreeNode(queryString); -#endif EnsureCoordinator(); CheckCitusVersion(ERROR); @@ -152,11 +146,7 @@ master_apply_delete_command(PG_FUNCTION_ARGS) CheckDistributedTable(relationId); EnsureTablePermissions(relationId, ACL_DELETE); -#if (PG_VERSION_NUM >= 100000) queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); -#else - queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0); -#endif deleteQuery = (Query *) linitial(queryTreeList); CheckTableCount(deleteQuery); @@ -593,11 +583,7 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList, restrictInfoList = lappend(restrictInfoList, lessThanRestrictInfo); restrictInfoList = lappend(restrictInfoList, greaterThanRestrictInfo); -#if (PG_VERSION_NUM >= 100000) dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList, false); -#else - dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList); -#endif if (dropShard) { dropShardIntervalList = lappend(dropShardIntervalList, shardInterval); diff --git a/src/backend/distributed/master/master_modify_multiple_shards.c b/src/backend/distributed/master/master_modify_multiple_shards.c index 2f1104472..721801c8f 100644 --- a/src/backend/distributed/master/master_modify_multiple_shards.c +++ b/src/backend/distributed/master/master_modify_multiple_shards.c @@ -92,12 +92,8 @@ master_modify_multiple_shards(PG_FUNCTION_ARGS) CmdType operation = CMD_UNKNOWN; TaskType taskType = TASK_TYPE_INVALID_FIRST; bool truncateOperation = false; -#if (PG_VERSION_NUM >= 100000) RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); queryTreeNode = rawStmt->stmt; -#else - queryTreeNode = ParseTreeNode(queryString); -#endif CheckCitusVersion(ERROR); @@ -152,11 +148,7 @@ master_modify_multiple_shards(PG_FUNCTION_ARGS) CheckDistributedTable(relationId); -#if (PG_VERSION_NUM >= 100000) queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); -#else - queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0); -#endif modifyQuery = (Query *) linitial(queryTreeList); operation = modifyQuery->commandType; diff --git a/src/backend/distributed/master/master_node_protocol.c b/src/backend/distributed/master/master_node_protocol.c index 145edc881..8ddaf833c 100644 --- a/src/backend/distributed/master/master_node_protocol.c +++ b/src/backend/distributed/master/master_node_protocol.c @@ -61,9 +61,7 @@ #include "utils/relcache.h" #include "utils/ruleutils.h" #include "utils/tqual.h" -#if (PG_VERSION_NUM >= 100000) #include "utils/varlena.h" -#endif /* Shard related configuration */ diff --git a/src/backend/distributed/master/master_stage_protocol.c b/src/backend/distributed/master/master_stage_protocol.c index 07bb2f0c7..fa06c4515 100644 --- a/src/backend/distributed/master/master_stage_protocol.c +++ b/src/backend/distributed/master/master_stage_protocol.c @@ -24,9 +24,7 @@ #include "commands/tablecmds.h" #include "catalog/indexing.h" #include "catalog/namespace.h" -#if (PG_VERSION_NUM >= 100000) #include "catalog/partition.h" -#endif #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" diff --git a/src/backend/distributed/master/worker_node_manager.c b/src/backend/distributed/master/worker_node_manager.c index d4855f956..b8fc43e6f 100644 --- a/src/backend/distributed/master/worker_node_manager.c +++ b/src/backend/distributed/master/worker_node_manager.c @@ -21,8 +21,6 @@ #include "libpq/hba.h" #if (PG_VERSION_NUM >= 100000) #include "common/ip.h" -#else -#include "libpq/ip.h" #endif #include "libpq/libpq-be.h" #include "postmaster/postmaster.h" diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index babbec86d..418440f09 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -918,11 +918,7 @@ List * SequenceDDLCommandsForTable(Oid relationId) { List *sequenceDDLList = NIL; -#if (PG_VERSION_NUM >= 100000) List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber); -#else - List *ownedSequences = getOwnedSequences(relationId); -#endif ListCell *listCell; char *ownerName = TableOwner(relationId); @@ -1008,7 +1004,6 @@ EnsureSupportedSequenceColumnType(Oid sequenceOid) bool hasMetadataWorkers = HasMetadataWorkers(); /* call sequenceIsOwned in order to get the tableId and columnId */ -#if (PG_VERSION_NUM >= 100000) bool sequenceOwned = sequenceIsOwned(sequenceOid, DEPENDENCY_AUTO, &tableId, &columnId); if (!sequenceOwned) @@ -1018,9 +1013,6 @@ EnsureSupportedSequenceColumnType(Oid sequenceOid) } Assert(sequenceOwned); -#else - sequenceIsOwned(sequenceOid, &tableId, &columnId); -#endif shouldSyncMetadata = ShouldSyncTableMetadata(tableId); diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 2fd96b5e1..f8ca33704 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -337,7 +337,6 @@ AdjustPartitioningForDistributedPlanning(Query *queryTree, { rangeTableEntry->inh = setPartitionedTablesInherited; -#if (PG_VERSION_NUM >= 100000) if (setPartitionedTablesInherited) { rangeTableEntry->relkind = RELKIND_PARTITIONED_TABLE; @@ -346,7 +345,6 @@ AdjustPartitioningForDistributedPlanning(Query *queryTree, { rangeTableEntry->relkind = RELKIND_RELATION; } -#endif } } } diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index bf89dc043..8b92b8578 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -87,15 +87,10 @@ static void ExplainTaskPlacement(ShardPlacement *taskPlacement, List *explainOut static StringInfo BuildRemoteExplainQuery(char *queryString, ExplainState *es); /* Static Explain functions copied from explain.c */ -#if (PG_VERSION_NUM >= 100000) static void ExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv); -#else -static void ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es, - const char *queryString, ParamListInfo params); -#endif #if (PG_VERSION_NUM < 110000) static void ExplainOpenGroup(const char *objtype, const char *labelname, bool labeled, ExplainState *es); @@ -165,11 +160,7 @@ CoordinatorInsertSelectExplainScan(CustomScanState *node, List *ancestors, ExplainOpenGroup("Select Query", "Select Query", false, es); /* explain the inner SELECT query */ -#if (PG_VERSION_NUM >= 100000) ExplainOneQuery(query, 0, into, es, queryString, params, NULL); -#else - ExplainOneQuery(query, into, es, queryString, params); -#endif ExplainCloseGroup("Select Query", "Select Query", false, es); } @@ -211,11 +202,7 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es) INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SUBTRACT(planduration, planduration); -#if (PG_VERSION_NUM >= 100000) ExplainOnePlan(plan, into, es, queryString, params, NULL, &planduration); -#else - ExplainOnePlan(plan, into, es, queryString, params, &planduration); -#endif if (es->format == EXPLAIN_FORMAT_TEXT) { @@ -654,15 +641,10 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es) * "into" is NULL unless we are explaining the contents of a CreateTableAsStmt. */ static void -#if (PG_VERSION_NUM >= 100000) ExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv) -#else -ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es, - const char *queryString, ParamListInfo params) -#endif { /* if an advisor plugin is present, let it manage things */ if (ExplainOneQuery_hook) @@ -672,8 +654,6 @@ ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es, #elif (PG_VERSION_NUM >= 100000) (*ExplainOneQuery_hook) (query, cursorOptions, into, es, queryString, params); -#else - (*ExplainOneQuery_hook) (query, into, es, queryString, params); #endif else { @@ -684,22 +664,14 @@ ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es, INSTR_TIME_SET_CURRENT(planstart); /* plan the query */ -#if (PG_VERSION_NUM >= 100000) plan = pg_plan_query(query, cursorOptions, params); -#else - plan = pg_plan_query(query, into ? 0 : CURSOR_OPT_PARALLEL_OK, params); -#endif INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SUBTRACT(planduration, planstart); /* run it (if needed) and produce output */ -#if (PG_VERSION_NUM >= 100000) ExplainOnePlan(plan, into, es, queryString, params, queryEnv, &planduration); -#else - ExplainOnePlan(plan, into, es, queryString, params, &planduration); -#endif } } diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 5bb255eed..a568c4943 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -1953,12 +1953,8 @@ MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType, * will convert the types of the aggregates if necessary. */ operatorNameList = list_make1(makeString(DIVISION_OPER_NAME)); -#if (PG_VERSION_NUM >= 100000) opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, NULL, -1); -#else - opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, -1); -#endif return opExpr; } diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index 92d3ea3d0..e1532c6a5 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -142,9 +142,7 @@ static bool MultiRouterPlannableQuery(Query *query, static DeferredErrorMessage * ErrorIfQueryHasModifyingCTE(Query *queryTree); static RangeTblEntry * GetUpdateOrDeleteRTE(Query *query); static bool SelectsFromDistributedTable(List *rangeTableList, Query *query); -#if (PG_VERSION_NUM >= 100000) static List * get_all_actual_clauses(List *restrictinfo_list); -#endif static int CompareInsertValuesByShardId(const void *leftElement, const void *rightElement); static uint64 GetInitialShardId(List *relationShardList); @@ -1294,13 +1292,8 @@ TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTre rightConst->constisnull = newValue->constisnull; rightConst->constbyval = newValue->constbyval; -#if (PG_VERSION_NUM >= 100000) predicateIsImplied = predicate_implied_by(list_make1(equalityExpr), restrictClauseList, false); -#else - predicateIsImplied = predicate_implied_by(list_make1(equalityExpr), - restrictClauseList); -#endif if (predicateIsImplied) { /* target entry of the form SET col = WHERE col = AND ... */ @@ -2518,13 +2511,10 @@ NormalizeMultiRowInsertTargetList(Query *query) valuesListCell->data.ptr_value = (void *) expandedValuesList; } -#if (PG_VERSION_NUM >= 100000) - /* reset coltypes, coltypmods, colcollations and rebuild them below */ valuesRTE->coltypes = NIL; valuesRTE->coltypmods = NIL; valuesRTE->colcollations = NIL; -#endif foreach(targetEntryCell, query->targetList) { @@ -2544,11 +2534,9 @@ NormalizeMultiRowInsertTargetList(Query *query) targetTypmod = exprTypmod(targetExprNode); targetColl = exprCollation(targetExprNode); -#if (PG_VERSION_NUM >= 100000) valuesRTE->coltypes = lappend_oid(valuesRTE->coltypes, targetType); valuesRTE->coltypmods = lappend_int(valuesRTE->coltypmods, targetTypmod); valuesRTE->colcollations = lappend_oid(valuesRTE->colcollations, targetColl); -#endif if (IsA(targetExprNode, Var)) { @@ -2996,8 +2984,6 @@ ErrorIfQueryHasModifyingCTE(Query *queryTree) } -#if (PG_VERSION_NUM >= 100000) - /* * get_all_actual_clauses * @@ -3024,9 +3010,6 @@ get_all_actual_clauses(List *restrictinfo_list) } -#endif - - /* * CompareInsertValuesByShardId does what it says in the name. Used for sorting * InsertValues objects by their shard. diff --git a/src/backend/distributed/relay/relay_event_utility.c b/src/backend/distributed/relay/relay_event_utility.c index 7fa2ea1c7..2bd597575 100644 --- a/src/backend/distributed/relay/relay_event_utility.c +++ b/src/backend/distributed/relay/relay_event_utility.c @@ -547,7 +547,6 @@ RelayEventExtendNamesForInterShardCommands(Node *parseTree, uint64 leftShardId, } } } -#if (PG_VERSION_NUM >= 100000) else if (command->subtype == AT_AttachPartition || command->subtype == AT_DetachPartition) { @@ -556,7 +555,6 @@ RelayEventExtendNamesForInterShardCommands(Node *parseTree, uint64 leftShardId, referencedTableName = &(partitionCommand->name->relname); relationSchemaName = &(partitionCommand->name->schemaname); } -#endif else { continue; diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 715f19eff..92f2d6ae2 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -203,11 +203,7 @@ _PG_init(void) planner_hook = distributed_planner; /* register utility hook */ -#if (PG_VERSION_NUM >= 100000) ProcessUtility_hook = multi_ProcessUtility; -#else - ProcessUtility_hook = multi_ProcessUtility9x; -#endif /* register for planner hook */ set_rel_pathlist_hook = multi_relation_restriction_hook; diff --git a/src/backend/distributed/test/deparse_shard_query.c b/src/backend/distributed/test/deparse_shard_query.c index 7c3f8eeda..bae0bc065 100644 --- a/src/backend/distributed/test/deparse_shard_query.c +++ b/src/backend/distributed/test/deparse_shard_query.c @@ -52,12 +52,8 @@ deparse_shard_query_test(PG_FUNCTION_ARGS) ListCell *queryTreeCell = NULL; List *queryTreeList = NIL; -#if (PG_VERSION_NUM >= 100000) queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar, NULL, 0, NULL); -#else - queryTreeList = pg_analyze_and_rewrite(parsetree, queryStringChar, NULL, 0); -#endif foreach(queryTreeCell, queryTreeList) { diff --git a/src/backend/distributed/test/distribution_metadata.c b/src/backend/distributed/test/distribution_metadata.c index 8702b863c..4b959a435 100644 --- a/src/backend/distributed/test/distribution_metadata.c +++ b/src/backend/distributed/test/distribution_metadata.c @@ -271,12 +271,8 @@ relation_count_in_query(PG_FUNCTION_ARGS) ListCell *queryTreeCell = NULL; List *queryTreeList = NIL; -#if (PG_VERSION_NUM >= 100000) queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar, NULL, 0, NULL); -#else - queryTreeList = pg_analyze_and_rewrite(parsetree, queryStringChar, NULL, 0); -#endif foreach(queryTreeCell, queryTreeList) { diff --git a/src/backend/distributed/test/partitioning_utils.c b/src/backend/distributed/test/partitioning_utils.c index ca038e93a..f13b3e885 100644 --- a/src/backend/distributed/test/partitioning_utils.c +++ b/src/backend/distributed/test/partitioning_utils.c @@ -37,9 +37,7 @@ generate_alter_table_detach_partition_command(PG_FUNCTION_ARGS) { char *command = ""; -#if (PG_VERSION_NUM >= 100000) command = GenerateDetachPartitionCommand(PG_GETARG_OID(0)); -#endif PG_RETURN_TEXT_P(cstring_to_text(command)); } @@ -53,9 +51,7 @@ generate_alter_table_attach_partition_command(PG_FUNCTION_ARGS) { char *command = ""; -#if (PG_VERSION_NUM >= 100000) command = GenerateAlterTableAttachPartitionCommand(PG_GETARG_OID(0)); -#endif PG_RETURN_TEXT_P(cstring_to_text(command)); } @@ -69,9 +65,7 @@ generate_partition_information(PG_FUNCTION_ARGS) { char *command = ""; -#if (PG_VERSION_NUM >= 100000) command = GeneratePartitioningInformation(PG_GETARG_OID(0)); -#endif PG_RETURN_TEXT_P(cstring_to_text(command)); } @@ -85,7 +79,6 @@ print_partitions(PG_FUNCTION_ARGS) { StringInfo resultRelationNames = makeStringInfo(); -#if (PG_VERSION_NUM >= 100000) List *partitionList = PartitionList(PG_GETARG_OID(0)); ListCell *partitionOidCell = NULL; @@ -103,7 +96,6 @@ print_partitions(PG_FUNCTION_ARGS) appendStringInfoString(resultRelationNames, get_rel_name(partitionOid)); } -#endif PG_RETURN_TEXT_P(cstring_to_text(resultRelationNames->data)); } diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 4c8def6fc..0ffb6d175 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -46,11 +46,7 @@ typedef struct BackendManagementShmemData { int trancheId; -#if (PG_VERSION_NUM >= 100000) NamedLWLockTranche namedLockTranche; -#else - LWLockTranche lockTranche; -#endif LWLock lock; /* @@ -554,36 +550,18 @@ BackendManagementShmemInit(void) int totalProcs = 0; char *trancheName = "Backend Management Tranche"; -#if (PG_VERSION_NUM >= 100000) NamedLWLockTranche *namedLockTranche = &backendManagementShmemData->namedLockTranche; -#else - LWLockTranche *lockTranche = &backendManagementShmemData->lockTranche; -#endif - /* start by zeroing out all the memory */ memset(backendManagementShmemData, 0, BackendManagementShmemSize()); -#if (PG_VERSION_NUM >= 100000) namedLockTranche->trancheId = LWLockNewTrancheId(); LWLockRegisterTranche(namedLockTranche->trancheId, trancheName); LWLockInitialize(&backendManagementShmemData->lock, namedLockTranche->trancheId); -#else - backendManagementShmemData->trancheId = LWLockNewTrancheId(); - - /* we only need a single lock */ - lockTranche->array_base = &backendManagementShmemData->lock; - lockTranche->array_stride = sizeof(LWLock); - lockTranche->name = trancheName; - - LWLockRegisterTranche(backendManagementShmemData->trancheId, lockTranche); - LWLockInitialize(&backendManagementShmemData->lock, - backendManagementShmemData->trancheId); -#endif /* start the distributed transaction ids from 1 */ pg_atomic_init_u64(&backendManagementShmemData->nextTransactionNumber, 1); diff --git a/src/backend/distributed/transaction/citus_dist_stat_activity.c b/src/backend/distributed/transaction/citus_dist_stat_activity.c index 8bbbd42a2..03157c4bd 100644 --- a/src/backend/distributed/transaction/citus_dist_stat_activity.c +++ b/src/backend/distributed/transaction/citus_dist_stat_activity.c @@ -36,9 +36,7 @@ #include "storage/spin.h" #include "storage/s_lock.h" #include "utils/builtins.h" -#if PG_VERSION_NUM >= 100000 #include "utils/fmgrprotos.h" -#endif #include "utils/inet.h" #include "utils/timestamp.h" @@ -121,153 +119,77 @@ * We get the query_host_name and query_host_port while opening the connection to * the node. We also replace initiator_node_identifier with initiator_node_host * and initiator_node_port. Thus, they are not in the query below. - * - * Also, backend_type introduced with pg 10+ so we have null in the previous verions. */ -#if PG_VERSION_NUM >= 100000 - - #define CITUS_DIST_STAT_ACTIVITY_QUERY \ +#define CITUS_DIST_STAT_ACTIVITY_QUERY \ "\ - SELECT \ - dist_txs.initiator_node_identifier, \ - dist_txs.transaction_number, \ - dist_txs.transaction_stamp, \ - pg_stat_activity.datid, \ - pg_stat_activity.datname, \ - pg_stat_activity.pid, \ - pg_stat_activity.usesysid, \ - pg_stat_activity.usename, \ - pg_stat_activity.application_name, \ - pg_stat_activity.client_addr, \ - pg_stat_activity.client_hostname, \ - pg_stat_activity.client_port, \ - pg_stat_activity.backend_start, \ - pg_stat_activity.xact_start, \ - pg_stat_activity.query_start, \ - pg_stat_activity.state_change, \ - pg_stat_activity.wait_event_type, \ - pg_stat_activity.wait_event, \ - pg_stat_activity.state, \ - pg_stat_activity.backend_xid, \ - pg_stat_activity.backend_xmin, \ - pg_stat_activity.query, \ - pg_stat_activity.backend_type \ - FROM \ - pg_stat_activity \ - INNER JOIN \ - get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ - ON pg_stat_activity.pid = dist_txs.process_id \ - WHERE \ - dist_txs.worker_query = false;" +SELECT \ + dist_txs.initiator_node_identifier, \ + dist_txs.transaction_number, \ + dist_txs.transaction_stamp, \ + pg_stat_activity.datid, \ + pg_stat_activity.datname, \ + pg_stat_activity.pid, \ + pg_stat_activity.usesysid, \ + pg_stat_activity.usename, \ + pg_stat_activity.application_name, \ + pg_stat_activity.client_addr, \ + pg_stat_activity.client_hostname, \ + pg_stat_activity.client_port, \ + pg_stat_activity.backend_start, \ + pg_stat_activity.xact_start, \ + pg_stat_activity.query_start, \ + pg_stat_activity.state_change, \ + pg_stat_activity.wait_event_type, \ + pg_stat_activity.wait_event, \ + pg_stat_activity.state, \ + pg_stat_activity.backend_xid, \ + pg_stat_activity.backend_xmin, \ + pg_stat_activity.query, \ + pg_stat_activity.backend_type \ +FROM \ + pg_stat_activity \ + INNER JOIN \ + get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ + ON pg_stat_activity.pid = dist_txs.process_id \ +WHERE \ + dist_txs.worker_query = false;" - #define CITUS_WORKER_STAT_ACTIVITY_QUERY \ +#define CITUS_WORKER_STAT_ACTIVITY_QUERY \ "\ - SELECT \ - dist_txs.initiator_node_identifier, \ - dist_txs.transaction_number, \ - dist_txs.transaction_stamp, \ - pg_stat_activity.datid, \ - pg_stat_activity.datname, \ - pg_stat_activity.pid, \ - pg_stat_activity.usesysid, \ - pg_stat_activity.usename, \ - pg_stat_activity.application_name, \ - pg_stat_activity.client_addr, \ - pg_stat_activity.client_hostname, \ - pg_stat_activity.client_port, \ - pg_stat_activity.backend_start, \ - pg_stat_activity.xact_start, \ - pg_stat_activity.query_start, \ - pg_stat_activity.state_change, \ - pg_stat_activity.wait_event_type, \ - pg_stat_activity.wait_event, \ - pg_stat_activity.state, \ - pg_stat_activity.backend_xid, \ - pg_stat_activity.backend_xmin, \ - pg_stat_activity.query, \ - pg_stat_activity.backend_type \ - FROM \ - pg_stat_activity \ - LEFT JOIN \ - get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ - ON pg_stat_activity.pid = dist_txs.process_id \ - WHERE \ - pg_stat_activity.application_name = 'citus' \ - AND \ - pg_stat_activity.query NOT ILIKE '%stat_activity%';" -#else - #define CITUS_DIST_STAT_ACTIVITY_QUERY \ - "\ - SELECT \ - dist_txs.initiator_node_identifier, \ - dist_txs.transaction_number, \ - dist_txs.transaction_stamp, \ - pg_stat_activity.datid, \ - pg_stat_activity.datname, \ - pg_stat_activity.pid, \ - pg_stat_activity.usesysid, \ - pg_stat_activity.usename, \ - pg_stat_activity.application_name, \ - pg_stat_activity.client_addr, \ - pg_stat_activity.client_hostname, \ - pg_stat_activity.client_port, \ - pg_stat_activity.backend_start, \ - pg_stat_activity.xact_start, \ - pg_stat_activity.query_start, \ - pg_stat_activity.state_change, \ - pg_stat_activity.wait_event_type, \ - pg_stat_activity.wait_event, \ - pg_stat_activity.state, \ - pg_stat_activity.backend_xid, \ - pg_stat_activity.backend_xmin, \ - pg_stat_activity.query, \ - null \ - FROM \ - pg_stat_activity \ - INNER JOIN \ - get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ - ON pg_stat_activity.pid = dist_txs.process_id \ - WHERE \ - dist_txs.worker_query = false;" - - #define CITUS_WORKER_STAT_ACTIVITY_QUERY \ - "\ - SELECT \ - dist_txs.initiator_node_identifier, \ - dist_txs.transaction_number, \ - dist_txs.transaction_stamp, \ - pg_stat_activity.datid, \ - pg_stat_activity.datname, \ - pg_stat_activity.pid, \ - pg_stat_activity.usesysid, \ - pg_stat_activity.usename, \ - pg_stat_activity.application_name, \ - pg_stat_activity.client_addr, \ - pg_stat_activity.client_hostname, \ - pg_stat_activity.client_port, \ - pg_stat_activity.backend_start, \ - pg_stat_activity.xact_start, \ - pg_stat_activity.query_start, \ - pg_stat_activity.state_change, \ - pg_stat_activity.wait_event_type, \ - pg_stat_activity.wait_event, \ - pg_stat_activity.state, \ - pg_stat_activity.backend_xid, \ - pg_stat_activity.backend_xmin, \ - pg_stat_activity.query, \ - null \ - FROM \ - pg_stat_activity \ - LEFT JOIN \ - get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ - ON pg_stat_activity.pid = dist_txs.process_id \ - WHERE \ - pg_stat_activity.application_name = 'citus' \ - AND \ - pg_stat_activity.query NOT ILIKE '%stat_activity%';" - -#endif +SELECT \ + dist_txs.initiator_node_identifier, \ + dist_txs.transaction_number, \ + dist_txs.transaction_stamp, \ + pg_stat_activity.datid, \ + pg_stat_activity.datname, \ + pg_stat_activity.pid, \ + pg_stat_activity.usesysid, \ + pg_stat_activity.usename, \ + pg_stat_activity.application_name, \ + pg_stat_activity.client_addr, \ + pg_stat_activity.client_hostname, \ + pg_stat_activity.client_port, \ + pg_stat_activity.backend_start, \ + pg_stat_activity.xact_start, \ + pg_stat_activity.query_start, \ + pg_stat_activity.state_change, \ + pg_stat_activity.wait_event_type, \ + pg_stat_activity.wait_event, \ + pg_stat_activity.state, \ + pg_stat_activity.backend_xid, \ + pg_stat_activity.backend_xmin, \ + pg_stat_activity.query, \ + pg_stat_activity.backend_type \ +FROM \ + pg_stat_activity \ + LEFT JOIN \ + get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ + ON pg_stat_activity.pid = dist_txs.process_id \ +WHERE \ + pg_stat_activity.application_name = 'citus' \ + AND \ + pg_stat_activity.query NOT ILIKE '%stat_activity%';" typedef struct CitusDistStat { diff --git a/src/backend/distributed/utils/citus_clauses.c b/src/backend/distributed/utils/citus_clauses.c index 4003b3c8f..bb9693298 100644 --- a/src/backend/distributed/utils/citus_clauses.c +++ b/src/backend/distributed/utils/citus_clauses.c @@ -189,11 +189,7 @@ citus_evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod, /* * And evaluate it. */ -#if (PG_VERSION_NUM >= 100000) const_val = ExecEvalExprSwitchContext(exprstate, econtext, &const_is_null); -#else - const_val = ExecEvalExprSwitchContext(exprstate, econtext, &const_is_null, NULL); -#endif /* Get info needed about result datatype */ get_typlenbyval(result_type, &resultTypLen, &resultTypByVal); @@ -259,13 +255,11 @@ CitusIsVolatileFunction(Node *node) return true; } -#if (PG_VERSION_NUM >= 100000) if (IsA(node, NextValueExpr)) { /* NextValueExpr is volatile */ return true; } -#endif return false; } @@ -302,7 +296,6 @@ CitusIsMutableFunction(Node *node) return true; } -#if (PG_VERSION_NUM >= 100000) if (IsA(node, SQLValueFunction)) { /* all variants of SQLValueFunction are stable */ @@ -314,7 +307,6 @@ CitusIsMutableFunction(Node *node) /* NextValueExpr is volatile */ return true; } -#endif return false; } diff --git a/src/backend/distributed/utils/citus_nodefuncs.c b/src/backend/distributed/utils/citus_nodefuncs.c index f5f65ff12..ce0aa9f56 100644 --- a/src/backend/distributed/utils/citus_nodefuncs.c +++ b/src/backend/distributed/utils/citus_nodefuncs.c @@ -302,10 +302,8 @@ GetRangeTblKind(RangeTblEntry *rte) switch (rte->rtekind) { /* directly rtekind if it's not possibly an extended RTE */ -#if (PG_VERSION_NUM >= 100000) case RTE_TABLEFUNC: case RTE_NAMEDTUPLESTORE: -#endif case RTE_RELATION: case RTE_SUBQUERY: case RTE_JOIN: diff --git a/src/backend/distributed/utils/citus_ruleutils.c b/src/backend/distributed/utils/citus_ruleutils.c index bbe5707ce..7bc91c580 100644 --- a/src/backend/distributed/utils/citus_ruleutils.c +++ b/src/backend/distributed/utils/citus_ruleutils.c @@ -204,17 +204,10 @@ pg_get_sequencedef_string(Oid sequenceRelationId) /* build our DDL command */ qualifiedSequenceName = generate_relation_name(sequenceRelationId, NIL); -#if (PG_VERSION_NUM >= 100000) sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName, pgSequenceForm->seqincrement, pgSequenceForm->seqmin, pgSequenceForm->seqmax, pgSequenceForm->seqstart, pgSequenceForm->seqcycle ? "" : "NO "); -#else - sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName, - pgSequenceForm->increment_by, pgSequenceForm->min_value, - pgSequenceForm->max_value, pgSequenceForm->start_value, - pgSequenceForm->is_cycled ? "" : "NO "); -#endif return sequenceDef; } @@ -230,7 +223,6 @@ pg_get_sequencedef(Oid sequenceRelationId) Form_pg_sequence pgSequenceForm = NULL; HeapTuple heapTuple = NULL; -#if (PG_VERSION_NUM >= 100000) heapTuple = SearchSysCache1(SEQRELID, sequenceRelationId); if (!HeapTupleIsValid(heapTuple)) { @@ -240,38 +232,6 @@ pg_get_sequencedef(Oid sequenceRelationId) pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple); ReleaseSysCache(heapTuple); -#else - SysScanDesc scanDescriptor = NULL; - Relation sequenceRel = NULL; - AclResult permissionCheck = ACLCHECK_NO_PRIV; - - /* open and lock sequence */ - sequenceRel = heap_open(sequenceRelationId, AccessShareLock); - - /* check permissions to read sequence attributes */ - permissionCheck = pg_class_aclcheck(sequenceRelationId, GetUserId(), - ACL_SELECT | ACL_USAGE); - if (permissionCheck != ACLCHECK_OK) - { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied for sequence %s", - RelationGetRelationName(sequenceRel)))); - } - - /* retrieve attributes from first tuple */ - scanDescriptor = systable_beginscan(sequenceRel, InvalidOid, false, NULL, 0, NULL); - heapTuple = systable_getnext(scanDescriptor); - if (!HeapTupleIsValid(heapTuple)) - { - ereport(ERROR, (errmsg("could not find specified sequence"))); - } - - pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple); - - systable_endscan(scanDescriptor); - - heap_close(sequenceRel, AccessShareLock); -#endif return pgSequenceForm; } @@ -474,13 +434,11 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults) appendStringInfo(&buffer, " SERVER %s", quote_identifier(serverName)); AppendOptionListToString(&buffer, foreignTable->options); } -#if (PG_VERSION_NUM >= 100000) else if (relationKind == RELKIND_PARTITIONED_TABLE) { char *partitioningInformation = GeneratePartitioningInformation(tableRelationId); appendStringInfo(&buffer, " PARTITION BY %s ", partitioningInformation); } -#endif /* * Add any reloptions (storage parameters) defined on the table in a WITH diff --git a/src/backend/distributed/utils/maintenanced.c b/src/backend/distributed/utils/maintenanced.c index 605ec81e8..4769834c5 100644 --- a/src/backend/distributed/utils/maintenanced.c +++ b/src/backend/distributed/utils/maintenanced.c @@ -60,11 +60,7 @@ typedef struct MaintenanceDaemonControlData * data in MaintenanceDaemonDBHash. */ int trancheId; -#if (PG_VERSION_NUM >= 100000) char *lockTrancheName; -#else - LWLockTranche lockTranche; -#endif LWLock lock; } MaintenanceDaemonControlData; @@ -463,11 +459,7 @@ CitusMaintenanceDaemonMain(Datum main_arg) * Wait until timeout, or until somebody wakes us up. Also cast the timeout to * integer where we've calculated it using double for not losing the precision. */ -#if (PG_VERSION_NUM >= 100000) rc = WaitLatch(MyLatch, latchFlags, (long) timeout, PG_WAIT_EXTENSION); -#else - rc = WaitLatch(MyLatch, latchFlags, (long) timeout); -#endif /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) @@ -553,26 +545,10 @@ MaintenanceDaemonShmemInit(void) */ if (!alreadyInitialized) { -#if (PG_VERSION_NUM >= 100000) MaintenanceDaemonControl->trancheId = LWLockNewTrancheId(); MaintenanceDaemonControl->lockTrancheName = "Citus Maintenance Daemon"; LWLockRegisterTranche(MaintenanceDaemonControl->trancheId, MaintenanceDaemonControl->lockTrancheName); -#else - - /* initialize lwlock */ - LWLockTranche *tranche = &MaintenanceDaemonControl->lockTranche; - - /* start by zeroing out all the memory */ - memset(MaintenanceDaemonControl, 0, MaintenanceDaemonShmemSize()); - - /* initialize lock */ - MaintenanceDaemonControl->trancheId = LWLockNewTrancheId(); - tranche->array_base = &MaintenanceDaemonControl->lock; - tranche->array_stride = sizeof(LWLock); - tranche->name = "Citus Maintenance Daemon"; - LWLockRegisterTranche(MaintenanceDaemonControl->trancheId, tranche); -#endif LWLockInitialize(&MaintenanceDaemonControl->lock, MaintenanceDaemonControl->trancheId); diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index 28b276959..b03a6cf08 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -10,9 +10,7 @@ #include "access/heapam.h" #include "access/htup_details.h" #include "catalog/indexing.h" -#if (PG_VERSION_NUM >= 100000) #include "catalog/partition.h" -#endif #include "catalog/pg_class.h" #include "catalog/pg_inherits.h" #if (PG_VERSION_NUM < 110000) @@ -33,9 +31,7 @@ #include "utils/syscache.h" -#if (PG_VERSION_NUM >= 100000) static char * PartitionBound(Oid partitionId); -#endif /* @@ -47,12 +43,10 @@ PartitionedTable(Oid relationId) Relation rel = heap_open(relationId, AccessShareLock); bool partitionedTable = false; -#if (PG_VERSION_NUM >= 100000) if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { partitionedTable = true; } -#endif /* keep the lock */ heap_close(rel, NoLock); @@ -78,12 +72,10 @@ PartitionedTableNoLock(Oid relationId) return false; } -#if (PG_VERSION_NUM >= 100000) if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { partitionedTable = true; } -#endif /* keep the lock */ heap_close(rel, NoLock); @@ -101,9 +93,7 @@ PartitionTable(Oid relationId) Relation rel = heap_open(relationId, AccessShareLock); bool partitionTable = false; -#if (PG_VERSION_NUM >= 100000) partitionTable = rel->rd_rel->relispartition; -#endif /* keep the lock */ heap_close(rel, NoLock); @@ -129,9 +119,7 @@ PartitionTableNoLock(Oid relationId) return false; } -#if (PG_VERSION_NUM >= 100000) partitionTable = rel->rd_rel->relispartition; -#endif /* keep the lock */ heap_close(rel, NoLock); @@ -237,9 +225,7 @@ PartitionParentOid(Oid partitionOid) { Oid partitionParentOid = InvalidOid; -#if (PG_VERSION_NUM >= 100000) partitionParentOid = get_partition_parent(partitionOid); -#endif return partitionParentOid; } @@ -255,7 +241,6 @@ PartitionList(Oid parentRelationId) Relation rel = heap_open(parentRelationId, AccessShareLock); List *partitionList = NIL; -#if (PG_VERSION_NUM >= 100000) int partitionIndex = 0; int partitionCount = 0; @@ -274,7 +259,6 @@ PartitionList(Oid parentRelationId) partitionList = lappend_oid(partitionList, rel->rd_partdesc->oids[partitionIndex]); } -#endif /* keep the lock */ heap_close(rel, NoLock); @@ -291,8 +275,6 @@ char * GenerateDetachPartitionCommand(Oid partitionTableId) { StringInfo detachPartitionCommand = makeStringInfo(); - -#if (PG_VERSION_NUM >= 100000) Oid parentId = InvalidOid; char *tableQualifiedName = NULL; char *parentTableQualifiedName = NULL; @@ -311,7 +293,6 @@ GenerateDetachPartitionCommand(Oid partitionTableId) appendStringInfo(detachPartitionCommand, "ALTER TABLE IF EXISTS %s DETACH PARTITION %s;", parentTableQualifiedName, tableQualifiedName); -#endif return detachPartitionCommand->data; } @@ -325,8 +306,6 @@ char * GeneratePartitioningInformation(Oid parentTableId) { char *partitionBoundCString = ""; - -#if (PG_VERSION_NUM >= 100000) Datum partitionBoundDatum = 0; if (!PartitionedTable(parentTableId)) @@ -340,7 +319,6 @@ GeneratePartitioningInformation(Oid parentTableId) ObjectIdGetDatum(parentTableId)); partitionBoundCString = TextDatumGetCString(partitionBoundDatum); -#endif return partitionBoundCString; } @@ -398,8 +376,6 @@ char * GenerateAlterTableAttachPartitionCommand(Oid partitionTableId) { StringInfo createPartitionCommand = makeStringInfo(); - -#if (PG_VERSION_NUM >= 100000) char *partitionBoundCString = NULL; Oid parentId = InvalidOid; @@ -422,14 +398,11 @@ GenerateAlterTableAttachPartitionCommand(Oid partitionTableId) appendStringInfo(createPartitionCommand, "ALTER TABLE %s ATTACH PARTITION %s %s;", parentTableQualifiedName, tableQualifiedName, partitionBoundCString); -#endif return createPartitionCommand->data; } -#if (PG_VERSION_NUM >= 100000) - /* * This function heaviliy inspired from RelationBuildPartitionDesc() * which is avaliable in src/backend/catalog/partition.c. @@ -479,6 +452,3 @@ PartitionBound(Oid partitionId) return partitionBoundString; } - - -#endif diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index 6a0e1f2e1..36989d0f6 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -38,9 +38,7 @@ #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/lsyscache.h" -#if (PG_VERSION_NUM >= 100000) #include "utils/varlena.h" -#endif /* static definition and declarations */ diff --git a/src/backend/distributed/utils/ruleutils_96.c b/src/backend/distributed/utils/ruleutils_96.c deleted file mode 100644 index 3d5bb4cfb..000000000 --- a/src/backend/distributed/utils/ruleutils_96.c +++ /dev/null @@ -1,7582 +0,0 @@ -/*------------------------------------------------------------------------- - * - * ruleutils_96.c - * Additional, non core exposed, functions to convert stored - * expressions/querytrees back to source text - * - * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/distributed/utils/ruleutils_96.c - * - * This needs to be closely in sync with the core code. - *------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#if (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 90700) - -#include -#include -#include - -#include "access/amapi.h" -#include "access/htup_details.h" -#include "access/sysattr.h" -#include "catalog/dependency.h" -#include "catalog/indexing.h" -#include "catalog/pg_aggregate.h" -#include "catalog/pg_am.h" -#include "catalog/pg_authid.h" -#include "catalog/pg_collation.h" -#include "catalog/pg_constraint.h" -#include "catalog/pg_depend.h" -#include "catalog/pg_extension.h" -#include "catalog/pg_foreign_data_wrapper.h" -#include "catalog/pg_language.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_operator.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_trigger.h" -#include "catalog/pg_type.h" -#include "commands/defrem.h" -#include "commands/extension.h" -#include "commands/tablespace.h" -#include "common/keywords.h" -#include "distributed/citus_nodefuncs.h" -#include "distributed/citus_ruleutils.h" -#include "executor/spi.h" -#include "foreign/foreign.h" -#include "funcapi.h" -#include "mb/pg_wchar.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "optimizer/tlist.h" -#include "parser/parse_agg.h" -#include "parser/parse_func.h" -#include "parser/parse_node.h" -#include "parser/parse_oper.h" -#include "parser/parser.h" -#include "parser/parsetree.h" -#include "rewrite/rewriteHandler.h" -#include "rewrite/rewriteManip.h" -#include "rewrite/rewriteSupport.h" -#include "utils/array.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/hsearch.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/ruleutils.h" -#include "utils/snapmgr.h" -#include "utils/syscache.h" -#include "utils/tqual.h" -#include "utils/typcache.h" -#include "utils/xml.h" - - -/* ---------- - * Pretty formatting constants - * ---------- - */ - -/* Indent counts */ -#define PRETTYINDENT_STD 8 -#define PRETTYINDENT_JOIN 4 -#define PRETTYINDENT_VAR 4 - -#define PRETTYINDENT_LIMIT 40 /* wrap limit */ - -/* Pretty flags */ -#define PRETTYFLAG_PAREN 1 -#define PRETTYFLAG_INDENT 2 - -/* Default line length for pretty-print wrapping: 0 means wrap always */ -#define WRAP_COLUMN_DEFAULT 0 - -/* macro to test if pretty action needed */ -#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN) -#define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT) - - -/* ---------- - * Local data types - * ---------- - */ - -/* Context info needed for invoking a recursive querytree display routine */ -typedef struct -{ - StringInfo buf; /* output buffer to append to */ - List *namespaces; /* List of deparse_namespace nodes */ - List *windowClause; /* Current query level's WINDOW clause */ - List *windowTList; /* targetlist for resolving WINDOW clause */ - int prettyFlags; /* enabling of pretty-print functions */ - int wrapColumn; /* max line length, or -1 for no limit */ - int indentLevel; /* current indent level for prettyprint */ - bool varprefix; /* TRUE to print prefixes on Vars */ - Oid distrelid; /* the distributed table being modified, if valid */ - int64 shardid; /* a distributed table's shardid, if positive */ - ParseExprKind special_exprkind; /* set only for exprkinds needing - * special handling */ -} deparse_context; - -/* - * Each level of query context around a subtree needs a level of Var namespace. - * A Var having varlevelsup=N refers to the N'th item (counting from 0) in - * the current context's namespaces list. - * - * The rangetable is the list of actual RTEs from the query tree, and the - * cte list is the list of actual CTEs. - * - * rtable_names holds the alias name to be used for each RTE (either a C - * string, or NULL for nameless RTEs such as unnamed joins). - * rtable_columns holds the column alias names to be used for each RTE. - * - * In some cases we need to make names of merged JOIN USING columns unique - * across the whole query, not only per-RTE. If so, unique_using is TRUE - * and using_names is a list of C strings representing names already assigned - * to USING columns. - * - * When deparsing plan trees, there is always just a single item in the - * deparse_namespace list (since a plan tree never contains Vars with - * varlevelsup > 0). We store the PlanState node that is the immediate - * parent of the expression to be deparsed, as well as a list of that - * PlanState's ancestors. In addition, we store its outer and inner subplan - * state nodes, as well as their plan nodes' targetlists, and the index tlist - * if the current plan node might contain INDEX_VAR Vars. (These fields could - * be derived on-the-fly from the current PlanState, but it seems notationally - * clearer to set them up as separate fields.) - */ -typedef struct -{ - List *rtable; /* List of RangeTblEntry nodes */ - List *rtable_names; /* Parallel list of names for RTEs */ - List *rtable_columns; /* Parallel list of deparse_columns structs */ - List *ctes; /* List of CommonTableExpr nodes */ - /* Workspace for column alias assignment: */ - bool unique_using; /* Are we making USING names globally unique */ - List *using_names; /* List of assigned names for USING columns */ - /* Remaining fields are used only when deparsing a Plan tree: */ - PlanState *planstate; /* immediate parent of current expression */ - List *ancestors; /* ancestors of planstate */ - PlanState *outer_planstate; /* outer subplan state, or NULL if none */ - PlanState *inner_planstate; /* inner subplan state, or NULL if none */ - List *outer_tlist; /* referent for OUTER_VAR Vars */ - List *inner_tlist; /* referent for INNER_VAR Vars */ - List *index_tlist; /* referent for INDEX_VAR Vars */ -} deparse_namespace; - -/* - * Per-relation data about column alias names. - * - * Selecting aliases is unreasonably complicated because of the need to dump - * rules/views whose underlying tables may have had columns added, deleted, or - * renamed since the query was parsed. We must nonetheless print the rule/view - * in a form that can be reloaded and will produce the same results as before. - * - * For each RTE used in the query, we must assign column aliases that are - * unique within that RTE. SQL does not require this of the original query, - * but due to factors such as *-expansion we need to be able to uniquely - * reference every column in a decompiled query. As long as we qualify all - * column references, per-RTE uniqueness is sufficient for that. - * - * However, we can't ensure per-column name uniqueness for unnamed join RTEs, - * since they just inherit column names from their input RTEs, and we can't - * rename the columns at the join level. Most of the time this isn't an issue - * because we don't need to reference the join's output columns as such; we - * can reference the input columns instead. That approach can fail for merged - * JOIN USING columns, however, so when we have one of those in an unnamed - * join, we have to make that column's alias globally unique across the whole - * query to ensure it can be referenced unambiguously. - * - * Another problem is that a JOIN USING clause requires the columns to be - * merged to have the same aliases in both input RTEs, and that no other - * columns in those RTEs or their children conflict with the USING names. - * To handle that, we do USING-column alias assignment in a recursive - * traversal of the query's jointree. When descending through a JOIN with - * USING, we preassign the USING column names to the child columns, overriding - * other rules for column alias assignment. We also mark each RTE with a list - * of all USING column names selected for joins containing that RTE, so that - * when we assign other columns' aliases later, we can avoid conflicts. - * - * Another problem is that if a JOIN's input tables have had columns added or - * deleted since the query was parsed, we must generate a column alias list - * for the join that matches the current set of input columns --- otherwise, a - * change in the number of columns in the left input would throw off matching - * of aliases to columns of the right input. Thus, positions in the printable - * column alias list are not necessarily one-for-one with varattnos of the - * JOIN, so we need a separate new_colnames[] array for printing purposes. - */ -typedef struct -{ - /* - * colnames is an array containing column aliases to use for columns that - * existed when the query was parsed. Dropped columns have NULL entries. - * This array can be directly indexed by varattno to get a Var's name. - * - * Non-NULL entries are guaranteed unique within the RTE, *except* when - * this is for an unnamed JOIN RTE. In that case we merely copy up names - * from the two input RTEs. - * - * During the recursive descent in set_using_names(), forcible assignment - * of a child RTE's column name is represented by pre-setting that element - * of the child's colnames array. So at that stage, NULL entries in this - * array just mean that no name has been preassigned, not necessarily that - * the column is dropped. - */ - int num_cols; /* length of colnames[] array */ - char **colnames; /* array of C strings and NULLs */ - - /* - * new_colnames is an array containing column aliases to use for columns - * that would exist if the query was re-parsed against the current - * definitions of its base tables. This is what to print as the column - * alias list for the RTE. This array does not include dropped columns, - * but it will include columns added since original parsing. Indexes in - * it therefore have little to do with current varattno values. As above, - * entries are unique unless this is for an unnamed JOIN RTE. (In such an - * RTE, we never actually print this array, but we must compute it anyway - * for possible use in computing column names of upper joins.) The - * parallel array is_new_col marks which of these columns are new since - * original parsing. Entries with is_new_col false must match the - * non-NULL colnames entries one-for-one. - */ - int num_new_cols; /* length of new_colnames[] array */ - char **new_colnames; /* array of C strings */ - bool *is_new_col; /* array of bool flags */ - - /* This flag tells whether we should actually print a column alias list */ - bool printaliases; - - /* This list has all names used as USING names in joins above this RTE */ - List *parentUsing; /* names assigned to parent merged columns */ - - /* - * If this struct is for a JOIN RTE, we fill these fields during the - * set_using_names() pass to describe its relationship to its child RTEs. - * - * leftattnos and rightattnos are arrays with one entry per existing - * output column of the join (hence, indexable by join varattno). For a - * simple reference to a column of the left child, leftattnos[i] is the - * child RTE's attno and rightattnos[i] is zero; and conversely for a - * column of the right child. But for merged columns produced by JOIN - * USING/NATURAL JOIN, both leftattnos[i] and rightattnos[i] are nonzero. - * Also, if the column has been dropped, both are zero. - * - * If it's a JOIN USING, usingNames holds the alias names selected for the - * merged columns (these might be different from the original USING list, - * if we had to modify names to achieve uniqueness). - */ - int leftrti; /* rangetable index of left child */ - int rightrti; /* rangetable index of right child */ - int *leftattnos; /* left-child varattnos of join cols, or 0 */ - int *rightattnos; /* right-child varattnos of join cols, or 0 */ - List *usingNames; /* names assigned to merged columns */ -} deparse_columns; - -/* This macro is analogous to rt_fetch(), but for deparse_columns structs */ -#define deparse_columns_fetch(rangetable_index, dpns) \ - ((deparse_columns *) list_nth((dpns)->rtable_columns, (rangetable_index)-1)) - -/* - * Entry in set_rtable_names' hash table - */ -typedef struct -{ - char name[NAMEDATALEN]; /* Hash key --- must be first */ - int counter; /* Largest addition used so far for name */ -} NameHashEntry; - - -/* ---------- - * Local functions - * - * Most of these functions used to use fixed-size buffers to build their - * results. Now, they take an (already initialized) StringInfo object - * as a parameter, and append their text output to its contents. - * ---------- - */ -static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, - Bitmapset *rels_used); -static void set_deparse_for_query(deparse_namespace *dpns, Query *query, - List *parent_namespaces); -static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode); -static void set_using_names(deparse_namespace *dpns, Node *jtnode, - List *parentUsing); -static void set_relation_column_names(deparse_namespace *dpns, - RangeTblEntry *rte, - deparse_columns *colinfo); -static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo); -static bool colname_is_unique(char *colname, deparse_namespace *dpns, - deparse_columns *colinfo); -static char *make_colname_unique(char *colname, deparse_namespace *dpns, - deparse_columns *colinfo); -static void expand_colnames_array_to(deparse_columns *colinfo, int n); -static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, - deparse_columns *colinfo); -static void flatten_join_using_qual(Node *qual, - List **leftvars, List **rightvars); -static char *get_rtable_name(int rtindex, deparse_context *context); -static void set_deparse_planstate(deparse_namespace *dpns, PlanState *ps); -static void push_child_plan(deparse_namespace *dpns, PlanState *ps, - deparse_namespace *save_dpns); -static void pop_child_plan(deparse_namespace *dpns, - deparse_namespace *save_dpns); -static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, - deparse_namespace *save_dpns); -static void pop_ancestor_plan(deparse_namespace *dpns, - deparse_namespace *save_dpns); -static void get_query_def(Query *query, StringInfo buf, List *parentnamespace, - TupleDesc resultDesc, - int prettyFlags, int wrapColumn, int startIndent); -static void get_query_def_extended(Query *query, StringInfo buf, - List *parentnamespace, Oid distrelid, int64 shardid, - TupleDesc resultDesc, int prettyFlags, int wrapColumn, - int startIndent); -static void get_values_def(List *values_lists, deparse_context *context); -static void get_with_clause(Query *query, deparse_context *context); -static void get_select_query_def(Query *query, deparse_context *context, - TupleDesc resultDesc); -static void get_insert_query_def(Query *query, deparse_context *context); -static void get_update_query_def(Query *query, deparse_context *context); -static void get_update_query_targetlist_def(Query *query, List *targetList, - deparse_context *context, - RangeTblEntry *rte); -static void get_delete_query_def(Query *query, deparse_context *context); -static void get_utility_query_def(Query *query, deparse_context *context); -static void get_basic_select_query(Query *query, deparse_context *context, - TupleDesc resultDesc); -static void get_target_list(List *targetList, deparse_context *context, - TupleDesc resultDesc); -static void get_setop_query(Node *setOp, Query *query, - deparse_context *context, - TupleDesc resultDesc); -static Node *get_rule_sortgroupclause(Index ref, List *tlist, - bool force_colno, - deparse_context *context); -static void get_rule_groupingset(GroupingSet *gset, List *targetlist, - bool omit_parens, deparse_context *context); -static void get_rule_orderby(List *orderList, List *targetList, - bool force_colno, deparse_context *context); -static void get_rule_windowclause(Query *query, deparse_context *context); -static void get_rule_windowspec(WindowClause *wc, List *targetList, - deparse_context *context); -static char *get_variable(Var *var, int levelsup, bool istoplevel, - deparse_context *context); -static void get_special_variable(Node *node, deparse_context *context, - void *private); -static void resolve_special_varno(Node *node, deparse_context *context, - void *private, - void (*callback) (Node *, deparse_context *, void *)); -static Node *find_param_referent(Param *param, deparse_context *context, - deparse_namespace **dpns_p, ListCell **ancestor_cell_p); -static void get_parameter(Param *param, deparse_context *context); -static const char *get_simple_binary_op_name(OpExpr *expr); -static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags); -static void appendContextKeyword(deparse_context *context, const char *str, - int indentBefore, int indentAfter, int indentPlus); -static void removeStringInfoSpaces(StringInfo str); -static void get_rule_expr(Node *node, deparse_context *context, - bool showimplicit); -static void get_rule_expr_toplevel(Node *node, deparse_context *context, - bool showimplicit); -static void get_rule_expr_funccall(Node *node, deparse_context *context, - bool showimplicit); -static bool looks_like_function(Node *node); -static void get_oper_expr(OpExpr *expr, deparse_context *context); -static void get_func_expr(FuncExpr *expr, deparse_context *context, - bool showimplicit); -static void get_agg_expr(Aggref *aggref, deparse_context *context, - Aggref *original_aggref); -static void get_agg_combine_expr(Node *node, deparse_context *context, - void *private); -static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context); -static void get_coercion_expr(Node *arg, deparse_context *context, - Oid resulttype, int32 resulttypmod, - Node *parentNode); -static void get_const_expr(Const *constval, deparse_context *context, - int showtype); -static void get_const_collation(Const *constval, deparse_context *context); -static void simple_quote_literal(StringInfo buf, const char *val); -static void get_sublink_expr(SubLink *sublink, deparse_context *context); -static void get_from_clause(Query *query, const char *prefix, - deparse_context *context); -static void get_from_clause_item(Node *jtnode, Query *query, - deparse_context *context); -static void get_column_alias_list(deparse_columns *colinfo, - deparse_context *context); -static void get_from_clause_coldeflist(RangeTblFunction *rtfunc, - deparse_columns *colinfo, - deparse_context *context); -static void get_tablesample_def(TableSampleClause *tablesample, - deparse_context *context); -static void get_opclass_name(Oid opclass, Oid actual_datatype, - StringInfo buf); -static Node *processIndirection(Node *node, deparse_context *context); -static void printSubscripts(ArrayRef *aref, deparse_context *context); -static char *get_relation_name(Oid relid); -static char *generate_relation_or_shard_name(Oid relid, Oid distrelid, - int64 shardid, List *namespaces); -static char *generate_fragment_name(char *schemaName, char *tableName); -static char *generate_function_name(Oid funcid, int nargs, - List *argnames, Oid *argtypes, - bool has_variadic, bool *use_variadic_p, - ParseExprKind special_exprkind); -static char *generate_operator_name(Oid operid, Oid arg1, Oid arg2); - -#define only_marker(rte) ((rte)->inh ? "" : "ONLY ") - - - -/* - * pg_get_query_def parses back one query tree, and outputs the resulting query - * string into given buffer. - */ -void -pg_get_query_def(Query *query, StringInfo buffer) -{ - get_query_def(query, buffer, NIL, NULL, 0, WRAP_COLUMN_DEFAULT, 0); -} - - -/* - * set_rtable_names: select RTE aliases to be used in printing a query - * - * We fill in dpns->rtable_names with a list of names that is one-for-one with - * the already-filled dpns->rtable list. Each RTE name is unique among those - * in the new namespace plus any ancestor namespaces listed in - * parent_namespaces. - * - * If rels_used isn't NULL, only RTE indexes listed in it are given aliases. - * - * Note that this function is only concerned with relation names, not column - * names. - */ -static void -set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, - Bitmapset *rels_used) -{ - HASHCTL hash_ctl; - HTAB *names_hash; - NameHashEntry *hentry; - bool found; - int rtindex; - ListCell *lc; - - dpns->rtable_names = NIL; - /* nothing more to do if empty rtable */ - if (dpns->rtable == NIL) - return; - - /* - * We use a hash table to hold known names, so that this process is O(N) - * not O(N^2) for N names. - */ - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = NAMEDATALEN; - hash_ctl.entrysize = sizeof(NameHashEntry); - hash_ctl.hcxt = CurrentMemoryContext; - names_hash = hash_create("set_rtable_names names", - list_length(dpns->rtable), - &hash_ctl, - HASH_ELEM | HASH_CONTEXT); - /* Preload the hash table with names appearing in parent_namespaces */ - foreach(lc, parent_namespaces) - { - deparse_namespace *olddpns = (deparse_namespace *) lfirst(lc); - ListCell *lc2; - - foreach(lc2, olddpns->rtable_names) - { - char *oldname = (char *) lfirst(lc2); - - if (oldname == NULL) - continue; - hentry = (NameHashEntry *) hash_search(names_hash, - oldname, - HASH_ENTER, - &found); - /* we do not complain about duplicate names in parent namespaces */ - hentry->counter = 0; - } - } - - /* Now we can scan the rtable */ - rtindex = 1; - foreach(lc, dpns->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - char *refname; - - /* Just in case this takes an unreasonable amount of time ... */ - CHECK_FOR_INTERRUPTS(); - - if (rels_used && !bms_is_member(rtindex, rels_used)) - { - /* Ignore unreferenced RTE */ - refname = NULL; - } - else if (rte->alias) - { - /* If RTE has a user-defined alias, prefer that */ - refname = rte->alias->aliasname; - } - else if (rte->rtekind == RTE_RELATION) - { - /* Use the current actual name of the relation */ - refname = get_rel_name(rte->relid); - } - else if (rte->rtekind == RTE_JOIN) - { - /* Unnamed join has no refname */ - refname = NULL; - } - else - { - /* Otherwise use whatever the parser assigned */ - refname = rte->eref->aliasname; - } - - /* - * If the selected name isn't unique, append digits to make it so, and - * make a new hash entry for it once we've got a unique name. For a - * very long input name, we might have to truncate to stay within - * NAMEDATALEN. - */ - if (refname) - { - hentry = (NameHashEntry *) hash_search(names_hash, - refname, - HASH_ENTER, - &found); - if (found) - { - /* Name already in use, must choose a new one */ - int refnamelen = strlen(refname); - char *modname = (char *) palloc(refnamelen + 16); - NameHashEntry *hentry2; - - do - { - hentry->counter++; - for (;;) - { - /* - * We avoid using %.*s here because it can misbehave - * if the data is not valid in what libc thinks is the - * prevailing encoding. - */ - memcpy(modname, refname, refnamelen); - sprintf(modname + refnamelen, "_%d", hentry->counter); - if (strlen(modname) < NAMEDATALEN) - break; - /* drop chars from refname to keep all the digits */ - refnamelen = pg_mbcliplen(refname, refnamelen, - refnamelen - 1); - } - hentry2 = (NameHashEntry *) hash_search(names_hash, - modname, - HASH_ENTER, - &found); - } while (found); - hentry2->counter = 0; /* init new hash entry */ - refname = modname; - } - else - { - /* Name not previously used, need only initialize hentry */ - hentry->counter = 0; - } - } - - dpns->rtable_names = lappend(dpns->rtable_names, refname); - rtindex++; - } - - hash_destroy(names_hash); -} - -/* - * set_deparse_for_query: set up deparse_namespace for deparsing a Query tree - * - * For convenience, this is defined to initialize the deparse_namespace struct - * from scratch. - */ -static void -set_deparse_for_query(deparse_namespace *dpns, Query *query, - List *parent_namespaces) -{ - ListCell *lc; - ListCell *lc2; - - /* Initialize *dpns and fill rtable/ctes links */ - memset(dpns, 0, sizeof(deparse_namespace)); - dpns->rtable = query->rtable; - dpns->ctes = query->cteList; - - /* Assign a unique relation alias to each RTE */ - set_rtable_names(dpns, parent_namespaces, NULL); - - /* Initialize dpns->rtable_columns to contain zeroed structs */ - dpns->rtable_columns = NIL; - while (list_length(dpns->rtable_columns) < list_length(dpns->rtable)) - dpns->rtable_columns = lappend(dpns->rtable_columns, - palloc0(sizeof(deparse_columns))); - - /* If it's a utility query, it won't have a jointree */ - if (query->jointree) - { - /* Detect whether global uniqueness of USING names is needed */ - dpns->unique_using = - has_dangerous_join_using(dpns, (Node *) query->jointree); - - /* - * Select names for columns merged by USING, via a recursive pass over - * the query jointree. - */ - set_using_names(dpns, (Node *) query->jointree, NIL); - } - - /* - * Now assign remaining column aliases for each RTE. We do this in a - * linear scan of the rtable, so as to process RTEs whether or not they - * are in the jointree (we mustn't miss NEW.*, INSERT target relations, - * etc). JOIN RTEs must be processed after their children, but this is - * okay because they appear later in the rtable list than their children - * (cf Asserts in identify_join_columns()). - */ - forboth(lc, dpns->rtable, lc2, dpns->rtable_columns) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - deparse_columns *colinfo = (deparse_columns *) lfirst(lc2); - - if (rte->rtekind == RTE_JOIN) - set_join_column_names(dpns, rte, colinfo); - else - set_relation_column_names(dpns, rte, colinfo); - } -} - -/* - * has_dangerous_join_using: search jointree for unnamed JOIN USING - * - * Merged columns of a JOIN USING may act differently from either of the input - * columns, either because they are merged with COALESCE (in a FULL JOIN) or - * because an implicit coercion of the underlying input column is required. - * In such a case the column must be referenced as a column of the JOIN not as - * a column of either input. And this is problematic if the join is unnamed - * (alias-less): we cannot qualify the column's name with an RTE name, since - * there is none. (Forcibly assigning an alias to the join is not a solution, - * since that will prevent legal references to tables below the join.) - * To ensure that every column in the query is unambiguously referenceable, - * we must assign such merged columns names that are globally unique across - * the whole query, aliasing other columns out of the way as necessary. - * - * Because the ensuing re-aliasing is fairly damaging to the readability of - * the query, we don't do this unless we have to. So, we must pre-scan - * the join tree to see if we have to, before starting set_using_names(). - */ -static bool -has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode) -{ - if (IsA(jtnode, RangeTblRef)) - { - /* nothing to do here */ - } - else if (IsA(jtnode, FromExpr)) - { - FromExpr *f = (FromExpr *) jtnode; - ListCell *lc; - - foreach(lc, f->fromlist) - { - if (has_dangerous_join_using(dpns, (Node *) lfirst(lc))) - return true; - } - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - - /* Is it an unnamed JOIN with USING? */ - if (j->alias == NULL && j->usingClause) - { - /* - * Yes, so check each join alias var to see if any of them are not - * simple references to underlying columns. If so, we have a - * dangerous situation and must pick unique aliases. - */ - RangeTblEntry *jrte = rt_fetch(j->rtindex, dpns->rtable); - ListCell *lc; - - foreach(lc, jrte->joinaliasvars) - { - Var *aliasvar = (Var *) lfirst(lc); - - if (aliasvar != NULL && !IsA(aliasvar, Var)) - return true; - } - } - - /* Nope, but inspect children */ - if (has_dangerous_join_using(dpns, j->larg)) - return true; - if (has_dangerous_join_using(dpns, j->rarg)) - return true; - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); - return false; -} - -/* - * set_using_names: select column aliases to be used for merged USING columns - * - * We do this during a recursive descent of the query jointree. - * dpns->unique_using must already be set to determine the global strategy. - * - * Column alias info is saved in the dpns->rtable_columns list, which is - * assumed to be filled with pre-zeroed deparse_columns structs. - * - * parentUsing is a list of all USING aliases assigned in parent joins of - * the current jointree node. (The passed-in list must not be modified.) - */ -static void -set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing) -{ - if (IsA(jtnode, RangeTblRef)) - { - /* nothing to do now */ - } - else if (IsA(jtnode, FromExpr)) - { - FromExpr *f = (FromExpr *) jtnode; - ListCell *lc; - - foreach(lc, f->fromlist) - set_using_names(dpns, (Node *) lfirst(lc), parentUsing); - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - RangeTblEntry *rte = rt_fetch(j->rtindex, dpns->rtable); - deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); - int *leftattnos; - int *rightattnos; - deparse_columns *leftcolinfo; - deparse_columns *rightcolinfo; - int i; - ListCell *lc; - - /* Get info about the shape of the join */ - identify_join_columns(j, rte, colinfo); - leftattnos = colinfo->leftattnos; - rightattnos = colinfo->rightattnos; - - /* Look up the not-yet-filled-in child deparse_columns structs */ - leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); - rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); - - /* - * If this join is unnamed, then we cannot substitute new aliases at - * this level, so any name requirements pushed down to here must be - * pushed down again to the children. - */ - if (rte->alias == NULL) - { - for (i = 0; i < colinfo->num_cols; i++) - { - char *colname = colinfo->colnames[i]; - - if (colname == NULL) - continue; - - /* Push down to left column, unless it's a system column */ - if (leftattnos[i] > 0) - { - expand_colnames_array_to(leftcolinfo, leftattnos[i]); - leftcolinfo->colnames[leftattnos[i] - 1] = colname; - } - - /* Same on the righthand side */ - if (rightattnos[i] > 0) - { - expand_colnames_array_to(rightcolinfo, rightattnos[i]); - rightcolinfo->colnames[rightattnos[i] - 1] = colname; - } - } - } - - /* - * If there's a USING clause, select the USING column names and push - * those names down to the children. We have two strategies: - * - * If dpns->unique_using is TRUE, we force all USING names to be - * unique across the whole query level. In principle we'd only need - * the names of dangerous USING columns to be globally unique, but to - * safely assign all USING names in a single pass, we have to enforce - * the same uniqueness rule for all of them. However, if a USING - * column's name has been pushed down from the parent, we should use - * it as-is rather than making a uniqueness adjustment. This is - * necessary when we're at an unnamed join, and it creates no risk of - * ambiguity. Also, if there's a user-written output alias for a - * merged column, we prefer to use that rather than the input name; - * this simplifies the logic and seems likely to lead to less aliasing - * overall. - * - * If dpns->unique_using is FALSE, we only need USING names to be - * unique within their own join RTE. We still need to honor - * pushed-down names, though. - * - * Though significantly different in results, these two strategies are - * implemented by the same code, with only the difference of whether - * to put assigned names into dpns->using_names. - */ - if (j->usingClause) - { - /* Copy the input parentUsing list so we don't modify it */ - parentUsing = list_copy(parentUsing); - - /* USING names must correspond to the first join output columns */ - expand_colnames_array_to(colinfo, list_length(j->usingClause)); - i = 0; - foreach(lc, j->usingClause) - { - char *colname = strVal(lfirst(lc)); - - /* Assert it's a merged column */ - Assert(leftattnos[i] != 0 && rightattnos[i] != 0); - - /* Adopt passed-down name if any, else select unique name */ - if (colinfo->colnames[i] != NULL) - colname = colinfo->colnames[i]; - else - { - /* Prefer user-written output alias if any */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - /* Make it appropriately unique */ - colname = make_colname_unique(colname, dpns, colinfo); - if (dpns->unique_using) - dpns->using_names = lappend(dpns->using_names, - colname); - /* Save it as output column name, too */ - colinfo->colnames[i] = colname; - } - - /* Remember selected names for use later */ - colinfo->usingNames = lappend(colinfo->usingNames, colname); - parentUsing = lappend(parentUsing, colname); - - /* Push down to left column, unless it's a system column */ - if (leftattnos[i] > 0) - { - expand_colnames_array_to(leftcolinfo, leftattnos[i]); - leftcolinfo->colnames[leftattnos[i] - 1] = colname; - } - - /* Same on the righthand side */ - if (rightattnos[i] > 0) - { - expand_colnames_array_to(rightcolinfo, rightattnos[i]); - rightcolinfo->colnames[rightattnos[i] - 1] = colname; - } - - i++; - } - } - - /* Mark child deparse_columns structs with correct parentUsing info */ - leftcolinfo->parentUsing = parentUsing; - rightcolinfo->parentUsing = parentUsing; - - /* Now recursively assign USING column names in children */ - set_using_names(dpns, j->larg, parentUsing); - set_using_names(dpns, j->rarg, parentUsing); - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); -} - -/* - * set_relation_column_names: select column aliases for a non-join RTE - * - * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. - * If any colnames entries are already filled in, those override local - * choices. - */ -static void -set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo) -{ - int ncolumns; - char **real_colnames; - bool changed_any; - int noldcolumns; - int i; - int j; - - /* - * Extract the RTE's "real" column names. This is comparable to - * get_rte_attribute_name, except that it's important to disregard dropped - * columns. We put NULL into the array for a dropped column. - */ - if (rte->rtekind == RTE_RELATION) - { - /* Relation --- look to the system catalogs for up-to-date info */ - Relation rel; - TupleDesc tupdesc; - - rel = relation_open(rte->relid, AccessShareLock); - tupdesc = RelationGetDescr(rel); - - ncolumns = tupdesc->natts; - real_colnames = (char **) palloc(ncolumns * sizeof(char *)); - - for (i = 0; i < ncolumns; i++) - { - if (tupdesc->attrs[i]->attisdropped) - real_colnames[i] = NULL; - else - real_colnames[i] = pstrdup(NameStr(tupdesc->attrs[i]->attname)); - } - relation_close(rel, AccessShareLock); - } - else - { - /* Otherwise use the column names from eref */ - ListCell *lc; - - ncolumns = list_length(rte->eref->colnames); - real_colnames = (char **) palloc(ncolumns * sizeof(char *)); - - i = 0; - foreach(lc, rte->eref->colnames) - { - /* - * If the column name shown in eref is an empty string, then it's - * a column that was dropped at the time of parsing the query, so - * treat it as dropped. - */ - char *cname = strVal(lfirst(lc)); - - if (cname[0] == '\0') - cname = NULL; - real_colnames[i] = cname; - i++; - } - } - - /* - * Ensure colinfo->colnames has a slot for each column. (It could be long - * enough already, if we pushed down a name for the last column.) Note: - * it's possible that there are now more columns than there were when the - * query was parsed, ie colnames could be longer than rte->eref->colnames. - * We must assign unique aliases to the new columns too, else there could - * be unresolved conflicts when the view/rule is reloaded. - */ - expand_colnames_array_to(colinfo, ncolumns); - Assert(colinfo->num_cols == ncolumns); - - /* - * Make sufficiently large new_colnames and is_new_col arrays, too. - * - * Note: because we leave colinfo->num_new_cols zero until after the loop, - * colname_is_unique will not consult that array, which is fine because it - * would only be duplicate effort. - */ - colinfo->new_colnames = (char **) palloc(ncolumns * sizeof(char *)); - colinfo->is_new_col = (bool *) palloc(ncolumns * sizeof(bool)); - - /* - * Scan the columns, select a unique alias for each one, and store it in - * colinfo->colnames and colinfo->new_colnames. The former array has NULL - * entries for dropped columns, the latter omits them. Also mark - * new_colnames entries as to whether they are new since parse time; this - * is the case for entries beyond the length of rte->eref->colnames. - */ - noldcolumns = list_length(rte->eref->colnames); - changed_any = false; - j = 0; - for (i = 0; i < ncolumns; i++) - { - char *real_colname = real_colnames[i]; - char *colname = colinfo->colnames[i]; - - /* Skip dropped columns */ - if (real_colname == NULL) - { - Assert(colname == NULL); /* colnames[i] is already NULL */ - continue; - } - - /* If alias already assigned, that's what to use */ - if (colname == NULL) - { - /* If user wrote an alias, prefer that over real column name */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - else - colname = real_colname; - - /* Unique-ify and insert into colinfo */ - colname = make_colname_unique(colname, dpns, colinfo); - - colinfo->colnames[i] = colname; - } - - /* Put names of non-dropped columns in new_colnames[] too */ - colinfo->new_colnames[j] = colname; - /* And mark them as new or not */ - colinfo->is_new_col[j] = (i >= noldcolumns); - j++; - - /* Remember if any assigned aliases differ from "real" name */ - if (!changed_any && strcmp(colname, real_colname) != 0) - changed_any = true; - } - - /* - * Set correct length for new_colnames[] array. (Note: if columns have - * been added, colinfo->num_cols includes them, which is not really quite - * right but is harmless, since any new columns must be at the end where - * they won't affect varattnos of pre-existing columns.) - */ - colinfo->num_new_cols = j; - - /* - * For a relation RTE, we need only print the alias column names if any - * are different from the underlying "real" names. For a function RTE, - * always emit a complete column alias list; this is to protect against - * possible instability of the default column names (eg, from altering - * parameter names). For other RTE types, print if we changed anything OR - * if there were user-written column aliases (since the latter would be - * part of the underlying "reality"). - */ - if (rte->rtekind == RTE_RELATION) - colinfo->printaliases = changed_any; - else if (rte->rtekind == RTE_FUNCTION) - colinfo->printaliases = true; - else if (rte->alias && rte->alias->colnames != NIL) - colinfo->printaliases = true; - else - colinfo->printaliases = changed_any; -} - -/* - * set_join_column_names: select column aliases for a join RTE - * - * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. - * If any colnames entries are already filled in, those override local - * choices. Also, names for USING columns were already chosen by - * set_using_names(). We further expect that column alias selection has been - * completed for both input RTEs. - */ -static void -set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo) -{ - deparse_columns *leftcolinfo; - deparse_columns *rightcolinfo; - bool changed_any; - int noldcolumns; - int nnewcolumns; - Bitmapset *leftmerged = NULL; - Bitmapset *rightmerged = NULL; - int i; - int j; - int ic; - int jc; - - /* Look up the previously-filled-in child deparse_columns structs */ - leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); - rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); - - /* - * Ensure colinfo->colnames has a slot for each column. (It could be long - * enough already, if we pushed down a name for the last column.) Note: - * it's possible that one or both inputs now have more columns than there - * were when the query was parsed, but we'll deal with that below. We - * only need entries in colnames for pre-existing columns. - */ - noldcolumns = list_length(rte->eref->colnames); - expand_colnames_array_to(colinfo, noldcolumns); - Assert(colinfo->num_cols == noldcolumns); - - /* - * Scan the join output columns, select an alias for each one, and store - * it in colinfo->colnames. If there are USING columns, set_using_names() - * already selected their names, so we can start the loop at the first - * non-merged column. - */ - changed_any = false; - for (i = list_length(colinfo->usingNames); i < noldcolumns; i++) - { - char *colname = colinfo->colnames[i]; - char *real_colname; - - /* Ignore dropped column (only possible for non-merged column) */ - if (colinfo->leftattnos[i] == 0 && colinfo->rightattnos[i] == 0) - { - Assert(colname == NULL); - continue; - } - - /* Get the child column name */ - if (colinfo->leftattnos[i] > 0) - real_colname = leftcolinfo->colnames[colinfo->leftattnos[i] - 1]; - else if (colinfo->rightattnos[i] > 0) - real_colname = rightcolinfo->colnames[colinfo->rightattnos[i] - 1]; - else - { - /* We're joining system columns --- use eref name */ - real_colname = strVal(list_nth(rte->eref->colnames, i)); - } - Assert(real_colname != NULL); - - /* In an unnamed join, just report child column names as-is */ - if (rte->alias == NULL) - { - colinfo->colnames[i] = real_colname; - continue; - } - - /* If alias already assigned, that's what to use */ - if (colname == NULL) - { - /* If user wrote an alias, prefer that over real column name */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - else - colname = real_colname; - - /* Unique-ify and insert into colinfo */ - colname = make_colname_unique(colname, dpns, colinfo); - - colinfo->colnames[i] = colname; - } - - /* Remember if any assigned aliases differ from "real" name */ - if (!changed_any && strcmp(colname, real_colname) != 0) - changed_any = true; - } - - /* - * Calculate number of columns the join would have if it were re-parsed - * now, and create storage for the new_colnames and is_new_col arrays. - * - * Note: colname_is_unique will be consulting new_colnames[] during the - * loops below, so its not-yet-filled entries must be zeroes. - */ - nnewcolumns = leftcolinfo->num_new_cols + rightcolinfo->num_new_cols - - list_length(colinfo->usingNames); - colinfo->num_new_cols = nnewcolumns; - colinfo->new_colnames = (char **) palloc0(nnewcolumns * sizeof(char *)); - colinfo->is_new_col = (bool *) palloc0(nnewcolumns * sizeof(bool)); - - /* - * Generating the new_colnames array is a bit tricky since any new columns - * added since parse time must be inserted in the right places. This code - * must match the parser, which will order a join's columns as merged - * columns first (in USING-clause order), then non-merged columns from the - * left input (in attnum order), then non-merged columns from the right - * input (ditto). If one of the inputs is itself a join, its columns will - * be ordered according to the same rule, which means newly-added columns - * might not be at the end. We can figure out what's what by consulting - * the leftattnos and rightattnos arrays plus the input is_new_col arrays. - * - * In these loops, i indexes leftattnos/rightattnos (so it's join varattno - * less one), j indexes new_colnames/is_new_col, and ic/jc have similar - * meanings for the current child RTE. - */ - - /* Handle merged columns; they are first and can't be new */ - i = j = 0; - while (i < noldcolumns && - colinfo->leftattnos[i] != 0 && - colinfo->rightattnos[i] != 0) - { - /* column name is already determined and known unique */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - colinfo->is_new_col[j] = false; - - /* build bitmapsets of child attnums of merged columns */ - if (colinfo->leftattnos[i] > 0) - leftmerged = bms_add_member(leftmerged, colinfo->leftattnos[i]); - if (colinfo->rightattnos[i] > 0) - rightmerged = bms_add_member(rightmerged, colinfo->rightattnos[i]); - - i++, j++; - } - - /* Handle non-merged left-child columns */ - ic = 0; - for (jc = 0; jc < leftcolinfo->num_new_cols; jc++) - { - char *child_colname = leftcolinfo->new_colnames[jc]; - - if (!leftcolinfo->is_new_col[jc]) - { - /* Advance ic to next non-dropped old column of left child */ - while (ic < leftcolinfo->num_cols && - leftcolinfo->colnames[ic] == NULL) - ic++; - Assert(ic < leftcolinfo->num_cols); - ic++; - /* If it is a merged column, we already processed it */ - if (bms_is_member(ic, leftmerged)) - continue; - /* Else, advance i to the corresponding existing join column */ - while (i < colinfo->num_cols && - colinfo->colnames[i] == NULL) - i++; - Assert(i < colinfo->num_cols); - Assert(ic == colinfo->leftattnos[i]); - /* Use the already-assigned name of this column */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - i++; - } - else - { - /* - * Unique-ify the new child column name and assign, unless we're - * in an unnamed join, in which case just copy - */ - if (rte->alias != NULL) - { - colinfo->new_colnames[j] = - make_colname_unique(child_colname, dpns, colinfo); - if (!changed_any && - strcmp(colinfo->new_colnames[j], child_colname) != 0) - changed_any = true; - } - else - colinfo->new_colnames[j] = child_colname; - } - - colinfo->is_new_col[j] = leftcolinfo->is_new_col[jc]; - j++; - } - - /* Handle non-merged right-child columns in exactly the same way */ - ic = 0; - for (jc = 0; jc < rightcolinfo->num_new_cols; jc++) - { - char *child_colname = rightcolinfo->new_colnames[jc]; - - if (!rightcolinfo->is_new_col[jc]) - { - /* Advance ic to next non-dropped old column of right child */ - while (ic < rightcolinfo->num_cols && - rightcolinfo->colnames[ic] == NULL) - ic++; - Assert(ic < rightcolinfo->num_cols); - ic++; - /* If it is a merged column, we already processed it */ - if (bms_is_member(ic, rightmerged)) - continue; - /* Else, advance i to the corresponding existing join column */ - while (i < colinfo->num_cols && - colinfo->colnames[i] == NULL) - i++; - Assert(i < colinfo->num_cols); - Assert(ic == colinfo->rightattnos[i]); - /* Use the already-assigned name of this column */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - i++; - } - else - { - /* - * Unique-ify the new child column name and assign, unless we're - * in an unnamed join, in which case just copy - */ - if (rte->alias != NULL) - { - colinfo->new_colnames[j] = - make_colname_unique(child_colname, dpns, colinfo); - if (!changed_any && - strcmp(colinfo->new_colnames[j], child_colname) != 0) - changed_any = true; - } - else - colinfo->new_colnames[j] = child_colname; - } - - colinfo->is_new_col[j] = rightcolinfo->is_new_col[jc]; - j++; - } - - /* Assert we processed the right number of columns */ -#ifdef USE_ASSERT_CHECKING - while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) - i++; - Assert(i == colinfo->num_cols); - Assert(j == nnewcolumns); -#endif - - /* - * For a named join, print column aliases if we changed any from the child - * names. Unnamed joins cannot print aliases. - */ - if (rte->alias != NULL) - colinfo->printaliases = changed_any; - else - colinfo->printaliases = false; -} - -/* - * colname_is_unique: is colname distinct from already-chosen column names? - * - * dpns is query-wide info, colinfo is for the column's RTE - */ -static bool -colname_is_unique(char *colname, deparse_namespace *dpns, - deparse_columns *colinfo) -{ - int i; - ListCell *lc; - - /* Check against already-assigned column aliases within RTE */ - for (i = 0; i < colinfo->num_cols; i++) - { - char *oldname = colinfo->colnames[i]; - - if (oldname && strcmp(oldname, colname) == 0) - return false; - } - - /* - * If we're building a new_colnames array, check that too (this will be - * partially but not completely redundant with the previous checks) - */ - for (i = 0; i < colinfo->num_new_cols; i++) - { - char *oldname = colinfo->new_colnames[i]; - - if (oldname && strcmp(oldname, colname) == 0) - return false; - } - - /* Also check against USING-column names that must be globally unique */ - foreach(lc, dpns->using_names) - { - char *oldname = (char *) lfirst(lc); - - if (strcmp(oldname, colname) == 0) - return false; - } - - /* Also check against names already assigned for parent-join USING cols */ - foreach(lc, colinfo->parentUsing) - { - char *oldname = (char *) lfirst(lc); - - if (strcmp(oldname, colname) == 0) - return false; - } - - return true; -} - -/* - * make_colname_unique: modify colname if necessary to make it unique - * - * dpns is query-wide info, colinfo is for the column's RTE - */ -static char * -make_colname_unique(char *colname, deparse_namespace *dpns, - deparse_columns *colinfo) -{ - /* - * If the selected name isn't unique, append digits to make it so. For a - * very long input name, we might have to truncate to stay within - * NAMEDATALEN. - */ - if (!colname_is_unique(colname, dpns, colinfo)) - { - int colnamelen = strlen(colname); - char *modname = (char *) palloc(colnamelen + 16); - int i = 0; - - do - { - i++; - for (;;) - { - /* - * We avoid using %.*s here because it can misbehave if the - * data is not valid in what libc thinks is the prevailing - * encoding. - */ - memcpy(modname, colname, colnamelen); - sprintf(modname + colnamelen, "_%d", i); - if (strlen(modname) < NAMEDATALEN) - break; - /* drop chars from colname to keep all the digits */ - colnamelen = pg_mbcliplen(colname, colnamelen, - colnamelen - 1); - } - } while (!colname_is_unique(modname, dpns, colinfo)); - colname = modname; - } - return colname; -} - -/* - * expand_colnames_array_to: make colinfo->colnames at least n items long - * - * Any added array entries are initialized to zero. - */ -static void -expand_colnames_array_to(deparse_columns *colinfo, int n) -{ - if (n > colinfo->num_cols) - { - if (colinfo->colnames == NULL) - colinfo->colnames = (char **) palloc0(n * sizeof(char *)); - else - { - colinfo->colnames = (char **) repalloc(colinfo->colnames, - n * sizeof(char *)); - memset(colinfo->colnames + colinfo->num_cols, 0, - (n - colinfo->num_cols) * sizeof(char *)); - } - colinfo->num_cols = n; - } -} - -/* - * identify_join_columns: figure out where columns of a join come from - * - * Fills the join-specific fields of the colinfo struct, except for - * usingNames which is filled later. - */ -static void -identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, - deparse_columns *colinfo) -{ - int numjoincols; - int i; - ListCell *lc; - - /* Extract left/right child RT indexes */ - if (IsA(j->larg, RangeTblRef)) - colinfo->leftrti = ((RangeTblRef *) j->larg)->rtindex; - else if (IsA(j->larg, JoinExpr)) - colinfo->leftrti = ((JoinExpr *) j->larg)->rtindex; - else - elog(ERROR, "unrecognized node type in jointree: %d", - (int) nodeTag(j->larg)); - if (IsA(j->rarg, RangeTblRef)) - colinfo->rightrti = ((RangeTblRef *) j->rarg)->rtindex; - else if (IsA(j->rarg, JoinExpr)) - colinfo->rightrti = ((JoinExpr *) j->rarg)->rtindex; - else - elog(ERROR, "unrecognized node type in jointree: %d", - (int) nodeTag(j->rarg)); - - /* Assert children will be processed earlier than join in second pass */ - Assert(colinfo->leftrti < j->rtindex); - Assert(colinfo->rightrti < j->rtindex); - - /* Initialize result arrays with zeroes */ - numjoincols = list_length(jrte->joinaliasvars); - Assert(numjoincols == list_length(jrte->eref->colnames)); - colinfo->leftattnos = (int *) palloc0(numjoincols * sizeof(int)); - colinfo->rightattnos = (int *) palloc0(numjoincols * sizeof(int)); - - /* Scan the joinaliasvars list to identify simple column references */ - i = 0; - foreach(lc, jrte->joinaliasvars) - { - Var *aliasvar = (Var *) lfirst(lc); - - /* get rid of any implicit coercion above the Var */ - aliasvar = (Var *) strip_implicit_coercions((Node *) aliasvar); - - if (aliasvar == NULL) - { - /* It's a dropped column; nothing to do here */ - } - else if (IsA(aliasvar, Var)) - { - Assert(aliasvar->varlevelsup == 0); - Assert(aliasvar->varattno != 0); - if (aliasvar->varno == colinfo->leftrti) - colinfo->leftattnos[i] = aliasvar->varattno; - else if (aliasvar->varno == colinfo->rightrti) - colinfo->rightattnos[i] = aliasvar->varattno; - else - elog(ERROR, "unexpected varno %d in JOIN RTE", - aliasvar->varno); - } - else if (IsA(aliasvar, CoalesceExpr)) - { - /* - * It's a merged column in FULL JOIN USING. Ignore it for now and - * let the code below identify the merged columns. - */ - } - else - elog(ERROR, "unrecognized node type in join alias vars: %d", - (int) nodeTag(aliasvar)); - - i++; - } - - /* - * If there's a USING clause, deconstruct the join quals to identify the - * merged columns. This is a tad painful but if we cannot rely on the - * column names, there is no other representation of which columns were - * joined by USING. (Unless the join type is FULL, we can't tell from the - * joinaliasvars list which columns are merged.) Note: we assume that the - * merged columns are the first output column(s) of the join. - */ - if (j->usingClause) - { - List *leftvars = NIL; - List *rightvars = NIL; - ListCell *lc2; - - /* Extract left- and right-side Vars from the qual expression */ - flatten_join_using_qual(j->quals, &leftvars, &rightvars); - Assert(list_length(leftvars) == list_length(j->usingClause)); - Assert(list_length(rightvars) == list_length(j->usingClause)); - - /* Mark the output columns accordingly */ - i = 0; - forboth(lc, leftvars, lc2, rightvars) - { - Var *leftvar = (Var *) lfirst(lc); - Var *rightvar = (Var *) lfirst(lc2); - - Assert(leftvar->varlevelsup == 0); - Assert(leftvar->varattno != 0); - if (leftvar->varno != colinfo->leftrti) - elog(ERROR, "unexpected varno %d in JOIN USING qual", - leftvar->varno); - colinfo->leftattnos[i] = leftvar->varattno; - - Assert(rightvar->varlevelsup == 0); - Assert(rightvar->varattno != 0); - if (rightvar->varno != colinfo->rightrti) - elog(ERROR, "unexpected varno %d in JOIN USING qual", - rightvar->varno); - colinfo->rightattnos[i] = rightvar->varattno; - - i++; - } - } -} - -/* - * flatten_join_using_qual: extract Vars being joined from a JOIN/USING qual - * - * We assume that transformJoinUsingClause won't have produced anything except - * AND nodes, equality operator nodes, and possibly implicit coercions, and - * that the AND node inputs match left-to-right with the original USING list. - * - * Caller must initialize the result lists to NIL. - */ -static void -flatten_join_using_qual(Node *qual, List **leftvars, List **rightvars) -{ - if (IsA(qual, BoolExpr)) - { - /* Handle AND nodes by recursion */ - BoolExpr *b = (BoolExpr *) qual; - ListCell *lc; - - Assert(b->boolop == AND_EXPR); - foreach(lc, b->args) - { - flatten_join_using_qual((Node *) lfirst(lc), - leftvars, rightvars); - } - } - else if (IsA(qual, OpExpr)) - { - /* Otherwise we should have an equality operator */ - OpExpr *op = (OpExpr *) qual; - Var *var; - - if (list_length(op->args) != 2) - elog(ERROR, "unexpected unary operator in JOIN/USING qual"); - /* Arguments should be Vars with perhaps implicit coercions */ - var = (Var *) strip_implicit_coercions((Node *) linitial(op->args)); - if (!IsA(var, Var)) - elog(ERROR, "unexpected node type in JOIN/USING qual: %d", - (int) nodeTag(var)); - *leftvars = lappend(*leftvars, var); - var = (Var *) strip_implicit_coercions((Node *) lsecond(op->args)); - if (!IsA(var, Var)) - elog(ERROR, "unexpected node type in JOIN/USING qual: %d", - (int) nodeTag(var)); - *rightvars = lappend(*rightvars, var); - } - else - { - /* Perhaps we have an implicit coercion to boolean? */ - Node *q = strip_implicit_coercions(qual); - - if (q != qual) - flatten_join_using_qual(q, leftvars, rightvars); - else - elog(ERROR, "unexpected node type in JOIN/USING qual: %d", - (int) nodeTag(qual)); - } -} - -/* - * get_rtable_name: convenience function to get a previously assigned RTE alias - * - * The RTE must belong to the topmost namespace level in "context". - */ -static char * -get_rtable_name(int rtindex, deparse_context *context) -{ - deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); - - Assert(rtindex > 0 && rtindex <= list_length(dpns->rtable_names)); - return (char *) list_nth(dpns->rtable_names, rtindex - 1); -} - -/* - * set_deparse_planstate: set up deparse_namespace to parse subexpressions - * of a given PlanState node - * - * This sets the planstate, outer_planstate, inner_planstate, outer_tlist, - * inner_tlist, and index_tlist fields. Caller is responsible for adjusting - * the ancestors list if necessary. Note that the rtable and ctes fields do - * not need to change when shifting attention to different plan nodes in a - * single plan tree. - */ -static void -set_deparse_planstate(deparse_namespace *dpns, PlanState *ps) -{ - dpns->planstate = ps; - - /* - * We special-case Append and MergeAppend to pretend that the first child - * plan is the OUTER referent; we have to interpret OUTER Vars in their - * tlists according to one of the children, and the first one is the most - * natural choice. Likewise special-case ModifyTable to pretend that the - * first child plan is the OUTER referent; this is to support RETURNING - * lists containing references to non-target relations. - */ - if (IsA(ps, AppendState)) - dpns->outer_planstate = ((AppendState *) ps)->appendplans[0]; - else if (IsA(ps, MergeAppendState)) - dpns->outer_planstate = ((MergeAppendState *) ps)->mergeplans[0]; - else if (IsA(ps, ModifyTableState)) - dpns->outer_planstate = ((ModifyTableState *) ps)->mt_plans[0]; - else - dpns->outer_planstate = outerPlanState(ps); - - if (dpns->outer_planstate) - dpns->outer_tlist = dpns->outer_planstate->plan->targetlist; - else - dpns->outer_tlist = NIL; - - /* - * For a SubqueryScan, pretend the subplan is INNER referent. (We don't - * use OUTER because that could someday conflict with the normal meaning.) - * Likewise, for a CteScan, pretend the subquery's plan is INNER referent. - * For ON CONFLICT .. UPDATE we just need the inner tlist to point to the - * excluded expression's tlist. (Similar to the SubqueryScan we don't want - * to reuse OUTER, it's used for RETURNING in some modify table cases, - * although not INSERT .. CONFLICT). - */ - if (IsA(ps, SubqueryScanState)) - dpns->inner_planstate = ((SubqueryScanState *) ps)->subplan; - else if (IsA(ps, CteScanState)) - dpns->inner_planstate = ((CteScanState *) ps)->cteplanstate; - else if (IsA(ps, ModifyTableState)) - dpns->inner_planstate = ps; - else - dpns->inner_planstate = innerPlanState(ps); - - if (IsA(ps, ModifyTableState)) - dpns->inner_tlist = ((ModifyTableState *) ps)->mt_excludedtlist; - else if (dpns->inner_planstate) - dpns->inner_tlist = dpns->inner_planstate->plan->targetlist; - else - dpns->inner_tlist = NIL; - - /* Set up referent for INDEX_VAR Vars, if needed */ - if (IsA(ps->plan, IndexOnlyScan)) - dpns->index_tlist = ((IndexOnlyScan *) ps->plan)->indextlist; - else if (IsA(ps->plan, ForeignScan)) - dpns->index_tlist = ((ForeignScan *) ps->plan)->fdw_scan_tlist; - else if (IsA(ps->plan, CustomScan)) - dpns->index_tlist = ((CustomScan *) ps->plan)->custom_scan_tlist; - else - dpns->index_tlist = NIL; -} - -/* - * push_child_plan: temporarily transfer deparsing attention to a child plan - * - * When expanding an OUTER_VAR or INNER_VAR reference, we must adjust the - * deparse context in case the referenced expression itself uses - * OUTER_VAR/INNER_VAR. We modify the top stack entry in-place to avoid - * affecting levelsup issues (although in a Plan tree there really shouldn't - * be any). - * - * Caller must provide a local deparse_namespace variable to save the - * previous state for pop_child_plan. - */ -static void -push_child_plan(deparse_namespace *dpns, PlanState *ps, - deparse_namespace *save_dpns) -{ - /* Save state for restoration later */ - *save_dpns = *dpns; - - /* Link current plan node into ancestors list */ - dpns->ancestors = lcons(dpns->planstate, dpns->ancestors); - - /* Set attention on selected child */ - set_deparse_planstate(dpns, ps); -} - -/* - * pop_child_plan: undo the effects of push_child_plan - */ -static void -pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) -{ - List *ancestors; - - /* Get rid of ancestors list cell added by push_child_plan */ - ancestors = list_delete_first(dpns->ancestors); - - /* Restore fields changed by push_child_plan */ - *dpns = *save_dpns; - - /* Make sure dpns->ancestors is right (may be unnecessary) */ - dpns->ancestors = ancestors; -} - -/* - * push_ancestor_plan: temporarily transfer deparsing attention to an - * ancestor plan - * - * When expanding a Param reference, we must adjust the deparse context - * to match the plan node that contains the expression being printed; - * otherwise we'd fail if that expression itself contains a Param or - * OUTER_VAR/INNER_VAR/INDEX_VAR variable. - * - * The target ancestor is conveniently identified by the ListCell holding it - * in dpns->ancestors. - * - * Caller must provide a local deparse_namespace variable to save the - * previous state for pop_ancestor_plan. - */ -static void -push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, - deparse_namespace *save_dpns) -{ - PlanState *ps = (PlanState *) lfirst(ancestor_cell); - List *ancestors; - - /* Save state for restoration later */ - *save_dpns = *dpns; - - /* Build a new ancestor list with just this node's ancestors */ - ancestors = NIL; - while ((ancestor_cell = lnext(ancestor_cell)) != NULL) - ancestors = lappend(ancestors, lfirst(ancestor_cell)); - dpns->ancestors = ancestors; - - /* Set attention on selected ancestor */ - set_deparse_planstate(dpns, ps); -} - -/* - * pop_ancestor_plan: undo the effects of push_ancestor_plan - */ -static void -pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) -{ - /* Free the ancestor list made in push_ancestor_plan */ - list_free(dpns->ancestors); - - /* Restore fields changed by push_ancestor_plan */ - *dpns = *save_dpns; -} - - -/* ---------- - * deparse_shard_query - Parse back a query for execution on a shard - * - * Builds an SQL string to perform the provided query on a specific shard and - * places this string into the provided buffer. - * ---------- - */ -void -deparse_shard_query(Query *query, Oid distrelid, int64 shardid, - StringInfo buffer) -{ - get_query_def_extended(query, buffer, NIL, distrelid, shardid, NULL, 0, - WRAP_COLUMN_DEFAULT, 0); -} - - -/* ---------- - * get_query_def - Parse back one query parsetree - * - * If resultDesc is not NULL, then it is the output tuple descriptor for - * the view represented by a SELECT query. - * ---------- - */ -static void -get_query_def(Query *query, StringInfo buf, List *parentnamespace, - TupleDesc resultDesc, - int prettyFlags, int wrapColumn, int startIndent) -{ - get_query_def_extended(query, buf, parentnamespace, InvalidOid, 0, resultDesc, - prettyFlags, wrapColumn, startIndent); -} - - -/* ---------- - * get_query_def_extended - Parse back one query parsetree, optionally - * with extension using a shard identifier. - * - * If distrelid is valid and shardid is positive, the provided shardid is added - * any time the provided relid is deparsed, so that the query may be executed - * on a placement for the given shard. - * ---------- - */ -static void -get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace, - Oid distrelid, int64 shardid, TupleDesc resultDesc, - int prettyFlags, int wrapColumn, int startIndent) -{ - deparse_context context; - deparse_namespace dpns; - - OverrideSearchPath *overridePath = NULL; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - /* - * Before we begin to examine the query, acquire locks on referenced - * relations, and fix up deleted columns in JOIN RTEs. This ensures - * consistent results. Note we assume it's OK to scribble on the passed - * querytree! - * - * We are only deparsing the query (we are not about to execute it), so we - * only need AccessShareLock on the relations it mentions. - */ - AcquireRewriteLocks(query, false, false); - - /* - * Set search_path to NIL so that all objects outside of pg_catalog will be - * schema-prefixed. pg_catalog will be added automatically when we call - * PushOverrideSearchPath(), since we set addCatalog to true; - */ - overridePath = GetOverrideSearchPath(CurrentMemoryContext); - overridePath->schemas = NIL; - overridePath->addCatalog = true; - PushOverrideSearchPath(overridePath); - - context.buf = buf; - context.namespaces = lcons(&dpns, list_copy(parentnamespace)); - context.windowClause = NIL; - context.windowTList = NIL; - context.varprefix = (parentnamespace != NIL || - list_length(query->rtable) != 1); - context.prettyFlags = prettyFlags; - context.wrapColumn = wrapColumn; - context.indentLevel = startIndent; - context.special_exprkind = EXPR_KIND_NONE; - context.distrelid = distrelid; - context.shardid = shardid; - - set_deparse_for_query(&dpns, query, parentnamespace); - - switch (query->commandType) - { - case CMD_SELECT: - get_select_query_def(query, &context, resultDesc); - break; - - case CMD_UPDATE: - get_update_query_def(query, &context); - break; - - case CMD_INSERT: - get_insert_query_def(query, &context); - break; - - case CMD_DELETE: - get_delete_query_def(query, &context); - break; - - case CMD_NOTHING: - appendStringInfoString(buf, "NOTHING"); - break; - - case CMD_UTILITY: - get_utility_query_def(query, &context); - break; - - default: - elog(ERROR, "unrecognized query command type: %d", - query->commandType); - break; - } - - /* revert back to original search_path */ - PopOverrideSearchPath(); -} - -/* ---------- - * get_values_def - Parse back a VALUES list - * ---------- - */ -static void -get_values_def(List *values_lists, deparse_context *context) -{ - StringInfo buf = context->buf; - bool first_list = true; - ListCell *vtl; - - appendStringInfoString(buf, "VALUES "); - - foreach(vtl, values_lists) - { - List *sublist = (List *) lfirst(vtl); - bool first_col = true; - ListCell *lc; - - if (first_list) - first_list = false; - else - appendStringInfoString(buf, ", "); - - appendStringInfoChar(buf, '('); - foreach(lc, sublist) - { - Node *col = (Node *) lfirst(lc); - - if (first_col) - first_col = false; - else - appendStringInfoChar(buf, ','); - - /* - * Print the value. Whole-row Vars need special treatment. - */ - get_rule_expr_toplevel(col, context, false); - } - appendStringInfoChar(buf, ')'); - } -} - -/* ---------- - * get_with_clause - Parse back a WITH clause - * ---------- - */ -static void -get_with_clause(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - if (query->cteList == NIL) - return; - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - - if (query->hasRecursive) - sep = "WITH RECURSIVE "; - else - sep = "WITH "; - foreach(l, query->cteList) - { - CommonTableExpr *cte = (CommonTableExpr *) lfirst(l); - - appendStringInfoString(buf, sep); - appendStringInfoString(buf, quote_identifier(cte->ctename)); - if (cte->aliascolnames) - { - bool first = true; - ListCell *col; - - appendStringInfoChar(buf, '('); - foreach(col, cte->aliascolnames) - { - if (first) - first = false; - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, - quote_identifier(strVal(lfirst(col)))); - } - appendStringInfoChar(buf, ')'); - } - appendStringInfoString(buf, " AS ("); - if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", 0, 0, 0); - get_query_def((Query *) cte->ctequery, buf, context->namespaces, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", 0, 0, 0); - appendStringInfoChar(buf, ')'); - sep = ", "; - } - - if (PRETTY_INDENT(context)) - { - context->indentLevel -= PRETTYINDENT_STD; - appendContextKeyword(context, "", 0, 0, 0); - } - else - appendStringInfoChar(buf, ' '); -} - -/* ---------- - * get_select_query_def - Parse back a SELECT parsetree - * ---------- - */ -static void -get_select_query_def(Query *query, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - List *save_windowclause; - List *save_windowtlist; - bool force_colno; - ListCell *l; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* Set up context for possible window functions */ - save_windowclause = context->windowClause; - context->windowClause = query->windowClause; - save_windowtlist = context->windowTList; - context->windowTList = query->targetList; - - /* - * If the Query node has a setOperations tree, then it's the top level of - * a UNION/INTERSECT/EXCEPT query; only the WITH, ORDER BY and LIMIT - * fields are interesting in the top query itself. - */ - if (query->setOperations) - { - get_setop_query(query->setOperations, query, context, resultDesc); - /* ORDER BY clauses must be simple in this case */ - force_colno = true; - } - else - { - get_basic_select_query(query, context, resultDesc); - force_colno = false; - } - - /* Add the ORDER BY clause if given */ - if (query->sortClause != NIL) - { - appendContextKeyword(context, " ORDER BY ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_orderby(query->sortClause, query->targetList, - force_colno, context); - } - - /* Add the LIMIT clause if given */ - if (query->limitOffset != NULL) - { - appendContextKeyword(context, " OFFSET ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - get_rule_expr(query->limitOffset, context, false); - } - if (query->limitCount != NULL) - { - appendContextKeyword(context, " LIMIT ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - if (IsA(query->limitCount, Const) && - ((Const *) query->limitCount)->constisnull) - appendStringInfoString(buf, "ALL"); - else - get_rule_expr(query->limitCount, context, false); - } - - /* Add FOR [KEY] UPDATE/SHARE clauses if present */ - if (query->hasForUpdate) - { - foreach(l, query->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(l); - - /* don't print implicit clauses */ - if (rc->pushedDown) - continue; - - switch (rc->strength) - { - case LCS_NONE: - /* we intentionally throw an error for LCS_NONE */ - elog(ERROR, "unrecognized LockClauseStrength %d", - (int) rc->strength); - break; - case LCS_FORKEYSHARE: - appendContextKeyword(context, " FOR KEY SHARE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORSHARE: - appendContextKeyword(context, " FOR SHARE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORNOKEYUPDATE: - appendContextKeyword(context, " FOR NO KEY UPDATE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORUPDATE: - appendContextKeyword(context, " FOR UPDATE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - } - - appendStringInfo(buf, " OF %s", - quote_identifier(get_rtable_name(rc->rti, - context))); - if (rc->waitPolicy == LockWaitError) - appendStringInfoString(buf, " NOWAIT"); - else if (rc->waitPolicy == LockWaitSkip) - appendStringInfoString(buf, " SKIP LOCKED"); - } - } - - context->windowClause = save_windowclause; - context->windowTList = save_windowtlist; -} - -/* - * Detect whether query looks like SELECT ... FROM VALUES(); - * if so, return the VALUES RTE. Otherwise return NULL. - */ -static RangeTblEntry * -get_simple_values_rte(Query *query) -{ - RangeTblEntry *result = NULL; - ListCell *lc; - - /* - * We want to return TRUE even if the Query also contains OLD or NEW rule - * RTEs. So the idea is to scan the rtable and see if there is only one - * inFromCl RTE that is a VALUES RTE. - */ - foreach(lc, query->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - - if (rte->rtekind == RTE_VALUES && rte->inFromCl) - { - if (result) - return NULL; /* multiple VALUES (probably not possible) */ - result = rte; - } - else if (rte->rtekind == RTE_RELATION && !rte->inFromCl) - continue; /* ignore rule entries */ - else - return NULL; /* something else -> not simple VALUES */ - } - - /* - * We don't need to check the targetlist in any great detail, because - * parser/analyze.c will never generate a "bare" VALUES RTE --- they only - * appear inside auto-generated sub-queries with very restricted - * structure. However, DefineView might have modified the tlist by - * injecting new column aliases; so compare tlist resnames against the - * RTE's names to detect that. - */ - if (result) - { - ListCell *lcn; - - if (list_length(query->targetList) != list_length(result->eref->colnames)) - return NULL; /* this probably cannot happen */ - forboth(lc, query->targetList, lcn, result->eref->colnames) - { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - char *cname = strVal(lfirst(lcn)); - - if (tle->resjunk) - return NULL; /* this probably cannot happen */ - if (tle->resname == NULL || strcmp(tle->resname, cname) != 0) - return NULL; /* column name has been changed */ - } - } - - return result; -} - -static void -get_basic_select_query(Query *query, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - RangeTblEntry *values_rte; - char *sep; - ListCell *l; - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - - /* - * If the query looks like SELECT * FROM (VALUES ...), then print just the - * VALUES part. This reverses what transformValuesClause() did at parse - * time. - */ - values_rte = get_simple_values_rte(query); - if (values_rte) - { - get_values_def(values_rte->values_lists, context); - return; - } - - /* - * Build up the query string - first we say SELECT - */ - appendStringInfoString(buf, "SELECT"); - - /* Add the DISTINCT clause if given */ - if (query->distinctClause != NIL) - { - if (query->hasDistinctOn) - { - appendStringInfoString(buf, " DISTINCT ON ("); - sep = ""; - foreach(l, query->distinctClause) - { - SortGroupClause *srt = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(srt->tleSortGroupRef, query->targetList, - false, context); - sep = ", "; - } - appendStringInfoChar(buf, ')'); - } - else - appendStringInfoString(buf, " DISTINCT"); - } - - /* Then we tell what to select (the targetlist) */ - get_target_list(query->targetList, context, resultDesc); - - /* Add the FROM clause if needed */ - get_from_clause(query, " FROM ", context); - - /* Add the WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add the GROUP BY clause if given */ - if (query->groupClause != NULL || query->groupingSets != NULL) - { - ParseExprKind save_exprkind; - - appendContextKeyword(context, " GROUP BY ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - - save_exprkind = context->special_exprkind; - context->special_exprkind = EXPR_KIND_GROUP_BY; - - if (query->groupingSets == NIL) - { - sep = ""; - foreach(l, query->groupClause) - { - SortGroupClause *grp = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, query->targetList, - false, context); - sep = ", "; - } - } - else - { - sep = ""; - foreach(l, query->groupingSets) - { - GroupingSet *grp = lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_groupingset(grp, query->targetList, true, context); - sep = ", "; - } - } - - context->special_exprkind = save_exprkind; - } - - /* Add the HAVING clause if given */ - if (query->havingQual != NULL) - { - appendContextKeyword(context, " HAVING ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - get_rule_expr(query->havingQual, context, false); - } - - /* Add the WINDOW clause if needed */ - if (query->windowClause != NIL) - get_rule_windowclause(query, context); -} - -/* ---------- - * get_target_list - Parse back a SELECT target list - * - * This is also used for RETURNING lists in INSERT/UPDATE/DELETE. - * ---------- - */ -static void -get_target_list(List *targetList, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - StringInfoData targetbuf; - bool last_was_multiline = false; - char *sep; - int colno; - ListCell *l; - - /* we use targetbuf to hold each TLE's text temporarily */ - initStringInfo(&targetbuf); - - sep = " "; - colno = 0; - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - char *colname; - char *attname; - - if (tle->resjunk) - continue; /* ignore junk entries */ - - appendStringInfoString(buf, sep); - sep = ", "; - colno++; - - /* - * Put the new field text into targetbuf so we can decide after we've - * got it whether or not it needs to go on a new line. - */ - resetStringInfo(&targetbuf); - context->buf = &targetbuf; - - /* - * We special-case Var nodes rather than using get_rule_expr. This is - * needed because get_rule_expr will display a whole-row Var as - * "foo.*", which is the preferred notation in most contexts, but at - * the top level of a SELECT list it's not right (the parser will - * expand that notation into multiple columns, yielding behavior - * different from a whole-row Var). We need to call get_variable - * directly so that we can tell it to do the right thing, and so that - * we can get the attribute name which is the default AS label. - */ - if (tle->expr && (IsA(tle->expr, Var))) - { - attname = get_variable((Var *) tle->expr, 0, true, context); - } - else - { - get_rule_expr((Node *) tle->expr, context, true); - /* We'll show the AS name unless it's this: */ - attname = "?column?"; - } - - /* - * Figure out what the result column should be called. In the context - * of a view, use the view's tuple descriptor (so as to pick up the - * effects of any column RENAME that's been done on the view). - * Otherwise, just use what we can find in the TLE. - */ - if (resultDesc && colno <= resultDesc->natts) - colname = NameStr(resultDesc->attrs[colno - 1]->attname); - else - colname = tle->resname; - - /* Show AS unless the column's name is correct as-is */ - if (colname) /* resname could be NULL */ - { - if (attname == NULL || strcmp(attname, colname) != 0) - appendStringInfo(&targetbuf, " AS %s", quote_identifier(colname)); - } - - /* Restore context's output buffer */ - context->buf = buf; - - /* Consider line-wrapping if enabled */ - if (PRETTY_INDENT(context) && context->wrapColumn >= 0) - { - int leading_nl_pos; - - /* Does the new field start with a new line? */ - if (targetbuf.len > 0 && targetbuf.data[0] == '\n') - leading_nl_pos = 0; - else - leading_nl_pos = -1; - - /* If so, we shouldn't add anything */ - if (leading_nl_pos >= 0) - { - /* instead, remove any trailing spaces currently in buf */ - removeStringInfoSpaces(buf); - } - else - { - char *trailing_nl; - - /* Locate the start of the current line in the output buffer */ - trailing_nl = strrchr(buf->data, '\n'); - if (trailing_nl == NULL) - trailing_nl = buf->data; - else - trailing_nl++; - - /* - * Add a newline, plus some indentation, if the new field is - * not the first and either the new field would cause an - * overflow or the last field used more than one line. - */ - if (colno > 1 && - ((strlen(trailing_nl) + targetbuf.len > context->wrapColumn) || - last_was_multiline)) - appendContextKeyword(context, "", -PRETTYINDENT_STD, - PRETTYINDENT_STD, PRETTYINDENT_VAR); - } - - /* Remember this field's multiline status for next iteration */ - last_was_multiline = - (strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL); - } - - /* Add the new field */ - appendStringInfoString(buf, targetbuf.data); - } - - /* clean up */ - pfree(targetbuf.data); -} - -static void -get_setop_query(Node *setOp, Query *query, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - bool need_paren; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - if (IsA(setOp, RangeTblRef)) - { - RangeTblRef *rtr = (RangeTblRef *) setOp; - RangeTblEntry *rte = rt_fetch(rtr->rtindex, query->rtable); - Query *subquery = rte->subquery; - - Assert(subquery != NULL); - Assert(subquery->setOperations == NULL); - /* Need parens if WITH, ORDER BY, FOR UPDATE, or LIMIT; see gram.y */ - need_paren = (subquery->cteList || - subquery->sortClause || - subquery->rowMarks || - subquery->limitOffset || - subquery->limitCount); - if (need_paren) - appendStringInfoChar(buf, '('); - get_query_def(subquery, buf, context->namespaces, resultDesc, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - if (need_paren) - appendStringInfoChar(buf, ')'); - } - else if (IsA(setOp, SetOperationStmt)) - { - SetOperationStmt *op = (SetOperationStmt *) setOp; - int subindent; - - /* - * We force parens when nesting two SetOperationStmts, except when the - * lefthand input is another setop of the same kind. Syntactically, - * we could omit parens in rather more cases, but it seems best to use - * parens to flag cases where the setop operator changes. If we use - * parens, we also increase the indentation level for the child query. - * - * There are some cases in which parens are needed around a leaf query - * too, but those are more easily handled at the next level down (see - * code above). - */ - if (IsA(op->larg, SetOperationStmt)) - { - SetOperationStmt *lop = (SetOperationStmt *) op->larg; - - if (op->op == lop->op && op->all == lop->all) - need_paren = false; - else - need_paren = true; - } - else - need_paren = false; - - if (need_paren) - { - appendStringInfoChar(buf, '('); - subindent = PRETTYINDENT_STD; - appendContextKeyword(context, "", subindent, 0, 0); - } - else - subindent = 0; - - get_setop_query(op->larg, query, context, resultDesc); - - if (need_paren) - appendContextKeyword(context, ") ", -subindent, 0, 0); - else if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", -subindent, 0, 0); - else - appendStringInfoChar(buf, ' '); - - switch (op->op) - { - case SETOP_UNION: - appendStringInfoString(buf, "UNION "); - break; - case SETOP_INTERSECT: - appendStringInfoString(buf, "INTERSECT "); - break; - case SETOP_EXCEPT: - appendStringInfoString(buf, "EXCEPT "); - break; - default: - elog(ERROR, "unrecognized set op: %d", - (int) op->op); - } - if (op->all) - appendStringInfoString(buf, "ALL "); - - /* Always parenthesize if RHS is another setop */ - need_paren = IsA(op->rarg, SetOperationStmt); - - /* - * The indentation code here is deliberately a bit different from that - * for the lefthand input, because we want the line breaks in - * different places. - */ - if (need_paren) - { - appendStringInfoChar(buf, '('); - subindent = PRETTYINDENT_STD; - } - else - subindent = 0; - appendContextKeyword(context, "", subindent, 0, 0); - - get_setop_query(op->rarg, query, context, resultDesc); - - if (PRETTY_INDENT(context)) - context->indentLevel -= subindent; - if (need_paren) - appendContextKeyword(context, ")", 0, 0, 0); - } - else - { - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(setOp)); - } -} - -/* - * Display a sort/group clause. - * - * Also returns the expression tree, so caller need not find it again. - */ -static Node * -get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno, - deparse_context *context) -{ - StringInfo buf = context->buf; - TargetEntry *tle; - Node *expr; - - tle = get_sortgroupref_tle(ref, tlist); - expr = (Node *) tle->expr; - - /* - * Use column-number form if requested by caller. Otherwise, if - * expression is a constant, force it to be dumped with an explicit cast - * as decoration --- this is because a simple integer constant is - * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we - * dump it without any decoration. If it's anything more complex than a - * simple Var, then force extra parens around it, to ensure it can't be - * misinterpreted as a cube() or rollup() construct. - */ - if (force_colno) - { - Assert(!tle->resjunk); - appendStringInfo(buf, "%d", tle->resno); - } - else if (expr && IsA(expr, Const)) - get_const_expr((Const *) expr, context, 1); - else if (!expr || IsA(expr, Var)) - get_rule_expr(expr, context, true); - else - { - /* - * We must force parens for function-like expressions even if - * PRETTY_PAREN is off, since those are the ones in danger of - * misparsing. For other expressions we need to force them only if - * PRETTY_PAREN is on, since otherwise the expression will output them - * itself. (We can't skip the parens.) - */ - bool need_paren = (PRETTY_PAREN(context) - || IsA(expr, FuncExpr) - ||IsA(expr, Aggref) - ||IsA(expr, WindowFunc)); - - if (need_paren) - appendStringInfoString(context->buf, "("); - get_rule_expr(expr, context, true); - if (need_paren) - appendStringInfoString(context->buf, ")"); - } - - return expr; -} - -/* - * Display a GroupingSet - */ -static void -get_rule_groupingset(GroupingSet *gset, List *targetlist, - bool omit_parens, deparse_context *context) -{ - ListCell *l; - StringInfo buf = context->buf; - bool omit_child_parens = true; - char *sep = ""; - - switch (gset->kind) - { - case GROUPING_SET_EMPTY: - appendStringInfoString(buf, "()"); - return; - - case GROUPING_SET_SIMPLE: - { - if (!omit_parens || list_length(gset->content) != 1) - appendStringInfoString(buf, "("); - - foreach(l, gset->content) - { - Index ref = lfirst_int(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(ref, targetlist, - false, context); - sep = ", "; - } - - if (!omit_parens || list_length(gset->content) != 1) - appendStringInfoString(buf, ")"); - } - return; - - case GROUPING_SET_ROLLUP: - appendStringInfoString(buf, "ROLLUP("); - break; - case GROUPING_SET_CUBE: - appendStringInfoString(buf, "CUBE("); - break; - case GROUPING_SET_SETS: - appendStringInfoString(buf, "GROUPING SETS ("); - omit_child_parens = false; - break; - } - - foreach(l, gset->content) - { - appendStringInfoString(buf, sep); - get_rule_groupingset(lfirst(l), targetlist, omit_child_parens, context); - sep = ", "; - } - - appendStringInfoString(buf, ")"); -} - -/* - * Display an ORDER BY list. - */ -static void -get_rule_orderby(List *orderList, List *targetList, - bool force_colno, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - sep = ""; - foreach(l, orderList) - { - SortGroupClause *srt = (SortGroupClause *) lfirst(l); - Node *sortexpr; - Oid sortcoltype; - TypeCacheEntry *typentry; - - appendStringInfoString(buf, sep); - sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, - force_colno, context); - sortcoltype = exprType(sortexpr); - /* See whether operator is default < or > for datatype */ - typentry = lookup_type_cache(sortcoltype, - TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); - if (srt->sortop == typentry->lt_opr) - { - /* ASC is default, so emit nothing for it */ - if (srt->nulls_first) - appendStringInfoString(buf, " NULLS FIRST"); - } - else if (srt->sortop == typentry->gt_opr) - { - appendStringInfoString(buf, " DESC"); - /* DESC defaults to NULLS FIRST */ - if (!srt->nulls_first) - appendStringInfoString(buf, " NULLS LAST"); - } - else - { - appendStringInfo(buf, " USING %s", - generate_operator_name(srt->sortop, - sortcoltype, - sortcoltype)); - /* be specific to eliminate ambiguity */ - if (srt->nulls_first) - appendStringInfoString(buf, " NULLS FIRST"); - else - appendStringInfoString(buf, " NULLS LAST"); - } - sep = ", "; - } -} - -/* - * Display a WINDOW clause. - * - * Note that the windowClause list might contain only anonymous window - * specifications, in which case we should print nothing here. - */ -static void -get_rule_windowclause(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - sep = NULL; - foreach(l, query->windowClause) - { - WindowClause *wc = (WindowClause *) lfirst(l); - - if (wc->name == NULL) - continue; /* ignore anonymous windows */ - - if (sep == NULL) - appendContextKeyword(context, " WINDOW ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - else - appendStringInfoString(buf, sep); - - appendStringInfo(buf, "%s AS ", quote_identifier(wc->name)); - - get_rule_windowspec(wc, query->targetList, context); - - sep = ", "; - } -} - -/* - * Display a window definition - */ -static void -get_rule_windowspec(WindowClause *wc, List *targetList, - deparse_context *context) -{ - StringInfo buf = context->buf; - bool needspace = false; - const char *sep; - ListCell *l; - - appendStringInfoChar(buf, '('); - if (wc->refname) - { - appendStringInfoString(buf, quote_identifier(wc->refname)); - needspace = true; - } - /* partition clauses are always inherited, so only print if no refname */ - if (wc->partitionClause && !wc->refname) - { - if (needspace) - appendStringInfoChar(buf, ' '); - appendStringInfoString(buf, "PARTITION BY "); - sep = ""; - foreach(l, wc->partitionClause) - { - SortGroupClause *grp = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, - false, context); - sep = ", "; - } - needspace = true; - } - /* print ordering clause only if not inherited */ - if (wc->orderClause && !wc->copiedOrder) - { - if (needspace) - appendStringInfoChar(buf, ' '); - appendStringInfoString(buf, "ORDER BY "); - get_rule_orderby(wc->orderClause, targetList, false, context); - needspace = true; - } - /* framing clause is never inherited, so print unless it's default */ - if (wc->frameOptions & FRAMEOPTION_NONDEFAULT) - { - if (needspace) - appendStringInfoChar(buf, ' '); - if (wc->frameOptions & FRAMEOPTION_RANGE) - appendStringInfoString(buf, "RANGE "); - else if (wc->frameOptions & FRAMEOPTION_ROWS) - appendStringInfoString(buf, "ROWS "); - else - Assert(false); - if (wc->frameOptions & FRAMEOPTION_BETWEEN) - appendStringInfoString(buf, "BETWEEN "); - if (wc->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) - appendStringInfoString(buf, "UNBOUNDED PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_START_CURRENT_ROW) - appendStringInfoString(buf, "CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_START_VALUE) - { - get_rule_expr(wc->startOffset, context, false); - if (wc->frameOptions & FRAMEOPTION_START_VALUE_PRECEDING) - appendStringInfoString(buf, " PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_START_VALUE_FOLLOWING) - appendStringInfoString(buf, " FOLLOWING "); - else - Assert(false); - } - else - Assert(false); - if (wc->frameOptions & FRAMEOPTION_BETWEEN) - { - appendStringInfoString(buf, "AND "); - if (wc->frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING) - appendStringInfoString(buf, "UNBOUNDED FOLLOWING "); - else if (wc->frameOptions & FRAMEOPTION_END_CURRENT_ROW) - appendStringInfoString(buf, "CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_END_VALUE) - { - get_rule_expr(wc->endOffset, context, false); - if (wc->frameOptions & FRAMEOPTION_END_VALUE_PRECEDING) - appendStringInfoString(buf, " PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_END_VALUE_FOLLOWING) - appendStringInfoString(buf, " FOLLOWING "); - else - Assert(false); - } - else - Assert(false); - } - /* we will now have a trailing space; remove it */ - buf->len--; - } - appendStringInfoChar(buf, ')'); -} - -/* ---------- - * get_insert_query_def - Parse back an INSERT parsetree - * ---------- - */ -static void -get_insert_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *select_rte = NULL; - RangeTblEntry *values_rte = NULL; - RangeTblEntry *rte; - char *sep; - ListCell *l; - List *strippedexprs; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * If it's an INSERT ... SELECT or multi-row VALUES, there will be a - * single RTE for the SELECT or VALUES. Plain VALUES has neither. - */ - foreach(l, query->rtable) - { - rte = (RangeTblEntry *) lfirst(l); - - if (rte->rtekind == RTE_SUBQUERY) - { - if (select_rte) - elog(ERROR, "too many subquery RTEs in INSERT"); - select_rte = rte; - } - - if (rte->rtekind == RTE_VALUES) - { - if (values_rte) - elog(ERROR, "too many values RTEs in INSERT"); - values_rte = rte; - } - } - if (select_rte && values_rte) - elog(ERROR, "both subquery and values RTEs in INSERT"); - - /* - * Start the query with INSERT INTO relname - */ - rte = rt_fetch(query->resultRelation, query->rtable); - Assert(rte->rtekind == RTE_RELATION); - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - appendStringInfo(buf, "INSERT INTO %s ", - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - /* INSERT requires AS keyword for target alias */ - if (rte->alias != NULL) - appendStringInfo(buf, "AS %s ", - quote_identifier(rte->alias->aliasname)); - - /* - * Add the insert-column-names list. Any indirection decoration needed on - * the column names can be inferred from the top targetlist. - */ - strippedexprs = NIL; - sep = ""; - if (query->targetList) - appendStringInfoChar(buf, '('); - foreach(l, query->targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - - if (tle->resjunk) - continue; /* ignore junk entries */ - - appendStringInfoString(buf, sep); - sep = ", "; - - /* - * Put out name of target column; look in the catalogs, not at - * tle->resname, since resname will fail to track RENAME. - */ - appendStringInfoString(buf, - quote_identifier(get_relid_attribute_name(rte->relid, - tle->resno))); - - /* - * Print any indirection needed (subfields or subscripts), and strip - * off the top-level nodes representing the indirection assignments. - * Add the stripped expressions to strippedexprs. (If it's a - * single-VALUES statement, the stripped expressions are the VALUES to - * print below. Otherwise they're just Vars and not really - * interesting.) - */ - strippedexprs = lappend(strippedexprs, - processIndirection((Node *) tle->expr, - context)); - } - if (query->targetList) - appendStringInfoString(buf, ") "); - - if (select_rte) - { - /* Add the SELECT */ - get_query_def(select_rte->subquery, buf, NIL, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - } - else if (values_rte) - { - /* Add the multi-VALUES expression lists */ - get_values_def(values_rte->values_lists, context); - } - else if (strippedexprs) - { - /* Add the single-VALUES expression list */ - appendContextKeyword(context, "VALUES (", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); - get_rule_expr((Node *) strippedexprs, context, false); - appendStringInfoChar(buf, ')'); - } - else - { - /* No expressions, so it must be DEFAULT VALUES */ - appendStringInfoString(buf, "DEFAULT VALUES"); - } - - /* Add ON CONFLICT if present */ - if (query->onConflict) - { - OnConflictExpr *confl = query->onConflict; - - appendStringInfoString(buf, " ON CONFLICT"); - - if (confl->arbiterElems) - { - /* Add the single-VALUES expression list */ - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) confl->arbiterElems, context, false); - appendStringInfoChar(buf, ')'); - - /* Add a WHERE clause (for partial indexes) if given */ - if (confl->arbiterWhere != NULL) - { - bool save_varprefix; - - /* - * Force non-prefixing of Vars, since parser assumes that they - * belong to target relation. WHERE clause does not use - * InferenceElem, so this is separately required. - */ - save_varprefix = context->varprefix; - context->varprefix = false; - - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(confl->arbiterWhere, context, false); - - context->varprefix = save_varprefix; - } - } - else if (OidIsValid(confl->constraint)) - { - char *constraint = get_constraint_name(confl->constraint); - int64 shardId = context->shardid; - - if (shardId > 0) - { - AppendShardIdToName(&constraint, shardId); - } - - if (!constraint) - elog(ERROR, "cache lookup failed for constraint %u", - confl->constraint); - appendStringInfo(buf, " ON CONSTRAINT %s", - quote_identifier(constraint)); - } - - if (confl->action == ONCONFLICT_NOTHING) - { - appendStringInfoString(buf, " DO NOTHING"); - } - else - { - appendStringInfoString(buf, " DO UPDATE SET "); - /* Deparse targetlist */ - get_update_query_targetlist_def(query, confl->onConflictSet, - context, rte); - - /* Add a WHERE clause if given */ - if (confl->onConflictWhere != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(confl->onConflictWhere, context, false); - } - } - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context, NULL); - } -} - - -/* ---------- - * get_update_query_def - Parse back an UPDATE parsetree - * ---------- - */ -static void -get_update_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * Start the query with UPDATE relname SET - */ - rte = rt_fetch(query->resultRelation, query->rtable); - - if (PRETTY_INDENT(context)) - { - appendStringInfoChar(buf, ' '); - context->indentLevel += PRETTYINDENT_STD; - } - - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "UPDATE %s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, fragmentTableName)); - - if(rte->eref != NULL) - appendStringInfo(buf, " %s", - quote_identifier(rte->eref->aliasname)); - } - else - { - appendStringInfo(buf, "UPDATE %s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - - if (rte->alias != NULL) - appendStringInfo(buf, " %s", - quote_identifier(rte->alias->aliasname)); - } - - appendStringInfoString(buf, " SET "); - - /* Deparse targetlist */ - get_update_query_targetlist_def(query, query->targetList, context, rte); - - /* Add the FROM clause if needed */ - get_from_clause(query, " FROM ", context); - - /* Add a WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context, NULL); - } -} - - -/* ---------- - * get_update_query_targetlist_def - Parse back an UPDATE targetlist - * ---------- - */ -static void -get_update_query_targetlist_def(Query *query, List *targetList, - deparse_context *context, RangeTblEntry *rte) -{ - StringInfo buf = context->buf; - ListCell *l; - ListCell *next_ma_cell; - int remaining_ma_columns; - const char *sep; - SubLink *cur_ma_sublink; - List *ma_sublinks; - - /* - * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks - * into a list. We expect them to appear, in ID order, in resjunk tlist - * entries. - */ - ma_sublinks = NIL; - if (query->hasSubLinks) /* else there can't be any */ - { - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - - if (tle->resjunk && IsA(tle->expr, SubLink)) - { - SubLink *sl = (SubLink *) tle->expr; - - if (sl->subLinkType == MULTIEXPR_SUBLINK) - { - ma_sublinks = lappend(ma_sublinks, sl); - Assert(sl->subLinkId == list_length(ma_sublinks)); - } - } - } - } - next_ma_cell = list_head(ma_sublinks); - cur_ma_sublink = NULL; - remaining_ma_columns = 0; - - /* Add the comma separated list of 'attname = value' */ - sep = ""; - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - Node *expr; - - if (tle->resjunk) - continue; /* ignore junk entries */ - - /* Emit separator (OK whether we're in multiassignment or not) */ - appendStringInfoString(buf, sep); - sep = ", "; - - /* - * Check to see if we're starting a multiassignment group: if so, - * output a left paren. - */ - if (next_ma_cell != NULL && cur_ma_sublink == NULL) - { - /* - * We must dig down into the expr to see if it's a PARAM_MULTIEXPR - * Param. That could be buried under FieldStores and ArrayRefs - * and CoerceToDomains (cf processIndirection()), and underneath - * those there could be an implicit type coercion. Because we - * would ignore implicit type coercions anyway, we don't need to - * be as careful as processIndirection() is about descending past - * implicit CoerceToDomains. - */ - expr = (Node *) tle->expr; - while (expr) - { - if (IsA(expr, FieldStore)) - { - FieldStore *fstore = (FieldStore *) expr; - - expr = (Node *) linitial(fstore->newvals); - } - else if (IsA(expr, ArrayRef)) - { - ArrayRef *aref = (ArrayRef *) expr; - - if (aref->refassgnexpr == NULL) - break; - expr = (Node *) aref->refassgnexpr; - } - else if (IsA(expr, CoerceToDomain)) - { - CoerceToDomain *cdomain = (CoerceToDomain *) expr; - - if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) - break; - expr = (Node *) cdomain->arg; - } - else - break; - } - expr = strip_implicit_coercions(expr); - - if (expr && IsA(expr, Param) && - ((Param *) expr)->paramkind == PARAM_MULTIEXPR) - { - cur_ma_sublink = (SubLink *) lfirst(next_ma_cell); - next_ma_cell = lnext(next_ma_cell); - remaining_ma_columns = count_nonjunk_tlist_entries( - ((Query *) cur_ma_sublink->subselect)->targetList); - Assert(((Param *) expr)->paramid == - ((cur_ma_sublink->subLinkId << 16) | 1)); - appendStringInfoChar(buf, '('); - } - } - - /* - * Put out name of target column; look in the catalogs, not at - * tle->resname, since resname will fail to track RENAME. - */ - appendStringInfoString(buf, - quote_identifier(get_relid_attribute_name(rte->relid, - tle->resno))); - - /* - * Print any indirection needed (subfields or subscripts), and strip - * off the top-level nodes representing the indirection assignments. - */ - expr = processIndirection((Node *) tle->expr, context); - - /* - * If we're in a multiassignment, skip printing anything more, unless - * this is the last column; in which case, what we print should be the - * sublink, not the Param. - */ - if (cur_ma_sublink != NULL) - { - if (--remaining_ma_columns > 0) - continue; /* not the last column of multiassignment */ - appendStringInfoChar(buf, ')'); - expr = (Node *) cur_ma_sublink; - cur_ma_sublink = NULL; - } - - appendStringInfoString(buf, " = "); - - get_rule_expr(expr, context, false); - } -} - - -/* ---------- - * get_delete_query_def - Parse back a DELETE parsetree - * ---------- - */ -static void -get_delete_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * Start the query with DELETE FROM relname - */ - rte = rt_fetch(query->resultRelation, query->rtable); - - if (PRETTY_INDENT(context)) - { - appendStringInfoChar(buf, ' '); - context->indentLevel += PRETTYINDENT_STD; - } - - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "DELETE FROM %s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, fragmentTableName)); - - if(rte->eref != NULL) - appendStringInfo(buf, " %s", - quote_identifier(rte->eref->aliasname)); - } - else - { - appendStringInfo(buf, "DELETE FROM %s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - - if (rte->alias != NULL) - appendStringInfo(buf, " %s", - quote_identifier(rte->alias->aliasname)); - } - - /* Add the USING clause if given */ - get_from_clause(query, " USING ", context); - - /* Add a WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context, NULL); - } -} - - -/* ---------- - * get_utility_query_def - Parse back a UTILITY parsetree - * ---------- - */ -static void -get_utility_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - - if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt)) - { - NotifyStmt *stmt = (NotifyStmt *) query->utilityStmt; - - appendContextKeyword(context, "", - 0, PRETTYINDENT_STD, 1); - appendStringInfo(buf, "NOTIFY %s", - quote_identifier(stmt->conditionname)); - if (stmt->payload) - { - appendStringInfoString(buf, ", "); - simple_quote_literal(buf, stmt->payload); - } - } - else if (query->utilityStmt && IsA(query->utilityStmt, TruncateStmt)) - { - TruncateStmt *stmt = (TruncateStmt *) query->utilityStmt; - List *relationList = stmt->relations; - ListCell *relationCell = NULL; - - appendContextKeyword(context, "", - 0, PRETTYINDENT_STD, 1); - - appendStringInfo(buf, "TRUNCATE TABLE"); - - foreach(relationCell, relationList) - { - RangeVar *relationVar = (RangeVar *) lfirst(relationCell); - Oid relationId = RangeVarGetRelid(relationVar, NoLock, false); - char *relationName = generate_relation_or_shard_name(relationId, - context->distrelid, - context->shardid, NIL); - appendStringInfo(buf, " %s", relationName); - - if (lnext(relationCell) != NULL) - { - appendStringInfo(buf, ","); - } - } - - if (stmt->restart_seqs) - { - appendStringInfo(buf, " RESTART IDENTITY"); - } - - if (stmt->behavior == DROP_CASCADE) - { - appendStringInfo(buf, " CASCADE"); - } - } - else - { - /* Currently only NOTIFY utility commands can appear in rules */ - elog(ERROR, "unexpected utility statement type"); - } -} - -/* - * Display a Var appropriately. - * - * In some cases (currently only when recursing into an unnamed join) - * the Var's varlevelsup has to be interpreted with respect to a context - * above the current one; levelsup indicates the offset. - * - * If istoplevel is TRUE, the Var is at the top level of a SELECT's - * targetlist, which means we need special treatment of whole-row Vars. - * Instead of the normal "tab.*", we'll print "tab.*::typename", which is a - * dirty hack to prevent "tab.*" from being expanded into multiple columns. - * (The parser will strip the useless coercion, so no inefficiency is added in - * dump and reload.) We used to print just "tab" in such cases, but that is - * ambiguous and will yield the wrong result if "tab" is also a plain column - * name in the query. - * - * Returns the attname of the Var, or NULL if the Var has no attname (because - * it is a whole-row Var or a subplan output reference). - */ -static char * -get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - AttrNumber attnum; - int netlevelsup; - deparse_namespace *dpns; - deparse_columns *colinfo; - char *refname; - char *attname; - - /* Find appropriate nesting depth */ - netlevelsup = var->varlevelsup + levelsup; - if (netlevelsup >= list_length(context->namespaces)) - elog(ERROR, "bogus varlevelsup: %d offset %d", - var->varlevelsup, levelsup); - dpns = (deparse_namespace *) list_nth(context->namespaces, - netlevelsup); - - /* - * Try to find the relevant RTE in this rtable. In a plan tree, it's - * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig - * down into the subplans, or INDEX_VAR, which is resolved similarly. Also - * find the aliases previously assigned for this RTE. - */ - if (var->varno >= 1 && var->varno <= list_length(dpns->rtable)) - { - rte = rt_fetch(var->varno, dpns->rtable); - refname = (char *) list_nth(dpns->rtable_names, var->varno - 1); - colinfo = deparse_columns_fetch(var->varno, dpns); - attnum = var->varattno; - } - else - { - resolve_special_varno((Node *) var, context, NULL, - get_special_variable); - return NULL; - } - - /* - * The planner will sometimes emit Vars referencing resjunk elements of a - * subquery's target list (this is currently only possible if it chooses - * to generate a "physical tlist" for a SubqueryScan or CteScan node). - * Although we prefer to print subquery-referencing Vars using the - * subquery's alias, that's not possible for resjunk items since they have - * no alias. So in that case, drill down to the subplan and print the - * contents of the referenced tlist item. This works because in a plan - * tree, such Vars can only occur in a SubqueryScan or CteScan node, and - * we'll have set dpns->inner_planstate to reference the child plan node. - */ - if ((rte->rtekind == RTE_SUBQUERY || rte->rtekind == RTE_CTE) && - attnum > list_length(rte->eref->colnames) && - dpns->inner_planstate) - { - TargetEntry *tle; - deparse_namespace save_dpns; - - tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); - if (!tle) - elog(ERROR, "invalid attnum %d for relation \"%s\"", - var->varattno, rte->eref->aliasname); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_planstate, &save_dpns); - - /* - * Force parentheses because our caller probably assumed a Var is a - * simple expression. - */ - if (!IsA(tle->expr, Var)) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) tle->expr, context, true); - if (!IsA(tle->expr, Var)) - appendStringInfoChar(buf, ')'); - - pop_child_plan(dpns, &save_dpns); - return NULL; - } - - /* - * If it's an unnamed join, look at the expansion of the alias variable. - * If it's a simple reference to one of the input vars, then recursively - * print the name of that var instead. When it's not a simple reference, - * we have to just print the unqualified join column name. (This can only - * happen with "dangerous" merged columns in a JOIN USING; we took pains - * previously to make the unqualified column name unique in such cases.) - * - * This wouldn't work in decompiling plan trees, because we don't store - * joinaliasvars lists after planning; but a plan tree should never - * contain a join alias variable. - */ - if (rte->rtekind == RTE_JOIN && rte->alias == NULL) - { - if (rte->joinaliasvars == NIL) - elog(ERROR, "cannot decompile join alias var in plan tree"); - if (attnum > 0) - { - Var *aliasvar; - - aliasvar = (Var *) list_nth(rte->joinaliasvars, attnum - 1); - /* we intentionally don't strip implicit coercions here */ - if (aliasvar && IsA(aliasvar, Var)) - { - return get_variable(aliasvar, var->varlevelsup + levelsup, - istoplevel, context); - } - } - - /* - * Unnamed join has no refname. (Note: since it's unnamed, there is - * no way the user could have referenced it to create a whole-row Var - * for it. So we don't have to cover that case below.) - */ - Assert(refname == NULL); - } - - if (attnum == InvalidAttrNumber) - attname = NULL; - else if (attnum > 0) - { - /* Get column name to use from the colinfo struct */ - if (attnum > colinfo->num_cols) - elog(ERROR, "invalid attnum %d for relation \"%s\"", - attnum, rte->eref->aliasname); - attname = colinfo->colnames[attnum - 1]; - if (attname == NULL) /* dropped column? */ - elog(ERROR, "invalid attnum %d for relation \"%s\"", - attnum, rte->eref->aliasname); - } - else if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - /* System column on a Citus shard */ - attname = get_relid_attribute_name(rte->relid, attnum); - } - else - { - /* System column - name is fixed, get it from the catalog */ - attname = get_rte_attribute_name(rte, attnum); - } - - if (refname && (context->varprefix || attname == NULL)) - { - appendStringInfoString(buf, quote_identifier(refname)); - appendStringInfoChar(buf, '.'); - } - if (attname) - appendStringInfoString(buf, quote_identifier(attname)); - else - { - appendStringInfoChar(buf, '*'); - if (istoplevel) - appendStringInfo(buf, "::%s", - format_type_with_typemod(var->vartype, - var->vartypmod)); - } - - return attname; -} - -/* - * Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This - * routine is actually a callback for get_special_varno, which handles finding - * the correct TargetEntry. We get the expression contained in that - * TargetEntry and just need to deparse it, a job we can throw back on - * get_rule_expr. - */ -static void -get_special_variable(Node *node, deparse_context *context, void *private) -{ - StringInfo buf = context->buf; - - /* - * Force parentheses because our caller probably assumed a Var is a simple - * expression. - */ - if (!IsA(node, Var)) - appendStringInfoChar(buf, '('); - get_rule_expr(node, context, true); - if (!IsA(node, Var)) - appendStringInfoChar(buf, ')'); -} - -/* - * Chase through plan references to special varnos (OUTER_VAR, INNER_VAR, - * INDEX_VAR) until we find a real Var or some kind of non-Var node; then, - * invoke the callback provided. - */ -static void -resolve_special_varno(Node *node, deparse_context *context, void *private, - void (*callback) (Node *, deparse_context *, void *)) -{ - Var *var; - deparse_namespace *dpns; - - /* If it's not a Var, invoke the callback. */ - if (!IsA(node, Var)) - { - callback(node, context, private); - return; - } - - /* Find appropriate nesting depth */ - var = (Var *) node; - dpns = (deparse_namespace *) list_nth(context->namespaces, - var->varlevelsup); - - /* - * It's a special RTE, so recurse. - */ - if (var->varno == OUTER_VAR && dpns->outer_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - - tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); - - push_child_plan(dpns, dpns->outer_planstate, &save_dpns); - resolve_special_varno((Node *) tle->expr, context, private, callback); - pop_child_plan(dpns, &save_dpns); - return; - } - else if (var->varno == INNER_VAR && dpns->inner_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - - tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); - - push_child_plan(dpns, dpns->inner_planstate, &save_dpns); - resolve_special_varno((Node *) tle->expr, context, private, callback); - pop_child_plan(dpns, &save_dpns); - return; - } - else if (var->varno == INDEX_VAR && dpns->index_tlist) - { - TargetEntry *tle; - - tle = get_tle_by_resno(dpns->index_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); - - resolve_special_varno((Node *) tle->expr, context, private, callback); - return; - } - else if (var->varno < 1 || var->varno > list_length(dpns->rtable)) - elog(ERROR, "bogus varno: %d", var->varno); - - /* Not special. Just invoke the callback. */ - callback(node, context, private); -} - -/* - * Get the name of a field of an expression of composite type. The - * expression is usually a Var, but we handle other cases too. - * - * levelsup is an extra offset to interpret the Var's varlevelsup correctly. - * - * This is fairly straightforward when the expression has a named composite - * type; we need only look up the type in the catalogs. However, the type - * could also be RECORD. Since no actual table or view column is allowed to - * have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE - * or to a subquery output. We drill down to find the ultimate defining - * expression and attempt to infer the field name from it. We ereport if we - * can't determine the name. - * - * Similarly, a PARAM of type RECORD has to refer to some expression of - * a determinable composite type. - */ -static const char * -get_name_for_var_field(Var *var, int fieldno, - int levelsup, deparse_context *context) -{ - RangeTblEntry *rte; - AttrNumber attnum; - int netlevelsup; - deparse_namespace *dpns; - TupleDesc tupleDesc; - Node *expr; - - /* - * If it's a RowExpr that was expanded from a whole-row Var, use the - * column names attached to it. - */ - if (IsA(var, RowExpr)) - { - RowExpr *r = (RowExpr *) var; - - if (fieldno > 0 && fieldno <= list_length(r->colnames)) - return strVal(list_nth(r->colnames, fieldno - 1)); - } - - /* - * If it's a Param of type RECORD, try to find what the Param refers to. - */ - if (IsA(var, Param)) - { - Param *param = (Param *) var; - ListCell *ancestor_cell; - - expr = find_param_referent(param, context, &dpns, &ancestor_cell); - if (expr) - { - /* Found a match, so recurse to decipher the field name */ - deparse_namespace save_dpns; - const char *result; - - push_ancestor_plan(dpns, ancestor_cell, &save_dpns); - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - pop_ancestor_plan(dpns, &save_dpns); - return result; - } - } - - /* - * If it's a Var of type RECORD, we have to find what the Var refers to; - * if not, we can use get_expr_result_type. If that fails, we try - * lookup_rowtype_tupdesc, which will probably fail too, but will ereport - * an acceptable message. - */ - if (!IsA(var, Var) || - var->vartype != RECORDOID) - { - if (get_expr_result_type((Node *) var, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) - tupleDesc = lookup_rowtype_tupdesc_copy(exprType((Node *) var), - exprTypmod((Node *) var)); - Assert(tupleDesc); - /* Got the tupdesc, so we can extract the field name */ - Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); - return NameStr(tupleDesc->attrs[fieldno - 1]->attname); - } - - /* Find appropriate nesting depth */ - netlevelsup = var->varlevelsup + levelsup; - if (netlevelsup >= list_length(context->namespaces)) - elog(ERROR, "bogus varlevelsup: %d offset %d", - var->varlevelsup, levelsup); - dpns = (deparse_namespace *) list_nth(context->namespaces, - netlevelsup); - - /* - * Try to find the relevant RTE in this rtable. In a plan tree, it's - * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig - * down into the subplans, or INDEX_VAR, which is resolved similarly. - */ - if (var->varno >= 1 && var->varno <= list_length(dpns->rtable)) - { - rte = rt_fetch(var->varno, dpns->rtable); - attnum = var->varattno; - } - else if (var->varno == OUTER_VAR && dpns->outer_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->outer_planstate, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - else if (var->varno == INNER_VAR && dpns->inner_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_planstate, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - else if (var->varno == INDEX_VAR && dpns->index_tlist) - { - TargetEntry *tle; - const char *result; - - tle = get_tle_by_resno(dpns->index_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); - - Assert(netlevelsup == 0); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - return result; - } - else - { - elog(ERROR, "bogus varno: %d", var->varno); - return NULL; /* keep compiler quiet */ - } - - if (attnum == InvalidAttrNumber) - { - /* Var is whole-row reference to RTE, so select the right field */ - return get_rte_attribute_name(rte, fieldno); - } - - /* - * This part has essentially the same logic as the parser's - * expandRecordVariable() function, but we are dealing with a different - * representation of the input context, and we only need one field name - * not a TupleDesc. Also, we need special cases for finding subquery and - * CTE subplans when deparsing Plan trees. - */ - expr = (Node *) var; /* default if we can't drill down */ - - switch (rte->rtekind) - { - case RTE_RELATION: - case RTE_VALUES: - - /* - * This case should not occur: a column of a table or values list - * shouldn't have type RECORD. Fall through and fail (most - * likely) at the bottom. - */ - break; - case RTE_SUBQUERY: - /* Subselect-in-FROM: examine sub-select's output expr */ - { - if (rte->subquery) - { - TargetEntry *ste = get_tle_by_resno(rte->subquery->targetList, - attnum); - - if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", - rte->eref->aliasname, attnum); - expr = (Node *) ste->expr; - if (IsA(expr, Var)) - { - /* - * Recurse into the sub-select to see what its Var - * refers to. We have to build an additional level of - * namespace to keep in step with varlevelsup in the - * subselect. - */ - deparse_namespace mydpns; - const char *result; - - set_deparse_for_query(&mydpns, rte->subquery, - context->namespaces); - - context->namespaces = lcons(&mydpns, - context->namespaces); - - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - - context->namespaces = - list_delete_first(context->namespaces); - - return result; - } - /* else fall through to inspect the expression */ - } - else - { - /* - * We're deparsing a Plan tree so we don't have complete - * RTE entries (in particular, rte->subquery is NULL). But - * the only place we'd see a Var directly referencing a - * SUBQUERY RTE is in a SubqueryScan plan node, and we can - * look into the child plan's tlist instead. - */ - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - if (!dpns->inner_planstate) - elog(ERROR, "failed to find plan for subquery %s", - rte->eref->aliasname); - tle = get_tle_by_resno(dpns->inner_tlist, attnum); - if (!tle) - elog(ERROR, "bogus varattno for subquery var: %d", - attnum); - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_planstate, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - } - break; - case RTE_JOIN: - /* Join RTE --- recursively inspect the alias variable */ - if (rte->joinaliasvars == NIL) - elog(ERROR, "cannot decompile join alias var in plan tree"); - Assert(attnum > 0 && attnum <= list_length(rte->joinaliasvars)); - expr = (Node *) list_nth(rte->joinaliasvars, attnum - 1); - Assert(expr != NULL); - /* we intentionally don't strip implicit coercions here */ - if (IsA(expr, Var)) - return get_name_for_var_field((Var *) expr, fieldno, - var->varlevelsup + levelsup, - context); - /* else fall through to inspect the expression */ - break; - case RTE_FUNCTION: - - /* - * We couldn't get here unless a function is declared with one of - * its result columns as RECORD, which is not allowed. - */ - break; - case RTE_CTE: - /* CTE reference: examine subquery's output expr */ - { - CommonTableExpr *cte = NULL; - Index ctelevelsup; - ListCell *lc; - - /* - * Try to find the referenced CTE using the namespace stack. - */ - ctelevelsup = rte->ctelevelsup + netlevelsup; - if (ctelevelsup >= list_length(context->namespaces)) - lc = NULL; - else - { - deparse_namespace *ctedpns; - - ctedpns = (deparse_namespace *) - list_nth(context->namespaces, ctelevelsup); - foreach(lc, ctedpns->ctes) - { - cte = (CommonTableExpr *) lfirst(lc); - if (strcmp(cte->ctename, rte->ctename) == 0) - break; - } - } - if (lc != NULL) - { - Query *ctequery = (Query *) cte->ctequery; - TargetEntry *ste = get_tle_by_resno(GetCTETargetList(cte), - attnum); - - if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", - rte->eref->aliasname, attnum); - expr = (Node *) ste->expr; - if (IsA(expr, Var)) - { - /* - * Recurse into the CTE to see what its Var refers to. - * We have to build an additional level of namespace - * to keep in step with varlevelsup in the CTE. - * Furthermore it could be an outer CTE, so we may - * have to delete some levels of namespace. - */ - List *save_nslist = context->namespaces; - List *new_nslist; - deparse_namespace mydpns; - const char *result; - - set_deparse_for_query(&mydpns, ctequery, - context->namespaces); - - new_nslist = list_copy_tail(context->namespaces, - ctelevelsup); - context->namespaces = lcons(&mydpns, new_nslist); - - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - - context->namespaces = save_nslist; - - return result; - } - /* else fall through to inspect the expression */ - } - else - { - /* - * We're deparsing a Plan tree so we don't have a CTE - * list. But the only place we'd see a Var directly - * referencing a CTE RTE is in a CteScan plan node, and we - * can look into the subplan's tlist instead. - */ - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - if (!dpns->inner_planstate) - elog(ERROR, "failed to find plan for CTE %s", - rte->eref->aliasname); - tle = get_tle_by_resno(dpns->inner_tlist, attnum); - if (!tle) - elog(ERROR, "bogus varattno for subquery var: %d", - attnum); - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_planstate, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - } - break; - } - - /* - * We now have an expression we can't expand any more, so see if - * get_expr_result_type() can do anything with it. If not, pass to - * lookup_rowtype_tupdesc() which will probably fail, but will give an - * appropriate error message while failing. - */ - if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) - tupleDesc = lookup_rowtype_tupdesc_copy(exprType(expr), - exprTypmod(expr)); - Assert(tupleDesc); - /* Got the tupdesc, so we can extract the field name */ - Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); - return NameStr(tupleDesc->attrs[fieldno - 1]->attname); -} - -/* - * Try to find the referenced expression for a PARAM_EXEC Param that might - * reference a parameter supplied by an upper NestLoop or SubPlan plan node. - * - * If successful, return the expression and set *dpns_p and *ancestor_cell_p - * appropriately for calling push_ancestor_plan(). If no referent can be - * found, return NULL. - */ -static Node * -find_param_referent(Param *param, deparse_context *context, - deparse_namespace **dpns_p, ListCell **ancestor_cell_p) -{ - /* Initialize output parameters to prevent compiler warnings */ - *dpns_p = NULL; - *ancestor_cell_p = NULL; - - /* - * If it's a PARAM_EXEC parameter, look for a matching NestLoopParam or - * SubPlan argument. This will necessarily be in some ancestor of the - * current expression's PlanState. - */ - if (param->paramkind == PARAM_EXEC) - { - deparse_namespace *dpns; - PlanState *child_ps; - bool in_same_plan_level; - ListCell *lc; - - dpns = (deparse_namespace *) linitial(context->namespaces); - child_ps = dpns->planstate; - in_same_plan_level = true; - - foreach(lc, dpns->ancestors) - { - PlanState *ps = (PlanState *) lfirst(lc); - ListCell *lc2; - - /* - * NestLoops transmit params to their inner child only; also, once - * we've crawled up out of a subplan, this couldn't possibly be - * the right match. - */ - if (IsA(ps, NestLoopState) && - child_ps == innerPlanState(ps) && - in_same_plan_level) - { - NestLoop *nl = (NestLoop *) ps->plan; - - foreach(lc2, nl->nestParams) - { - NestLoopParam *nlp = (NestLoopParam *) lfirst(lc2); - - if (nlp->paramno == param->paramid) - { - /* Found a match, so return it */ - *dpns_p = dpns; - *ancestor_cell_p = lc; - return (Node *) nlp->paramval; - } - } - } - - /* - * Check to see if we're crawling up from a subplan. - */ - foreach(lc2, ps->subPlan) - { - SubPlanState *sstate = (SubPlanState *) lfirst(lc2); - SubPlan *subplan = (SubPlan *) sstate->xprstate.expr; - ListCell *lc3; - ListCell *lc4; - - if (child_ps != sstate->planstate) - continue; - - /* Matched subplan, so check its arguments */ - forboth(lc3, subplan->parParam, lc4, subplan->args) - { - int paramid = lfirst_int(lc3); - Node *arg = (Node *) lfirst(lc4); - - if (paramid == param->paramid) - { - /* Found a match, so return it */ - *dpns_p = dpns; - *ancestor_cell_p = lc; - return arg; - } - } - - /* Keep looking, but we are emerging from a subplan. */ - in_same_plan_level = false; - break; - } - - /* - * Likewise check to see if we're emerging from an initplan. - * Initplans never have any parParams, so no need to search that - * list, but we need to know if we should reset - * in_same_plan_level. - */ - foreach(lc2, ps->initPlan) - { - SubPlanState *sstate = (SubPlanState *) lfirst(lc2); - - if (child_ps != sstate->planstate) - continue; - - /* No parameters to be had here. */ - Assert(((SubPlan *) sstate->xprstate.expr)->parParam == NIL); - - /* Keep looking, but we are emerging from an initplan. */ - in_same_plan_level = false; - break; - } - - /* No luck, crawl up to next ancestor */ - child_ps = ps; - } - } - - /* No referent found */ - return NULL; -} - -/* - * Display a Param appropriately. - */ -static void -get_parameter(Param *param, deparse_context *context) -{ - Node *expr; - deparse_namespace *dpns; - ListCell *ancestor_cell; - - /* - * If it's a PARAM_EXEC parameter, try to locate the expression from which - * the parameter was computed. Note that failing to find a referent isn't - * an error, since the Param might well be a subplan output rather than an - * input. - */ - expr = find_param_referent(param, context, &dpns, &ancestor_cell); - if (expr) - { - /* Found a match, so print it */ - deparse_namespace save_dpns; - bool save_varprefix; - bool need_paren; - - /* Switch attention to the ancestor plan node */ - push_ancestor_plan(dpns, ancestor_cell, &save_dpns); - - /* - * Force prefixing of Vars, since they won't belong to the relation - * being scanned in the original plan node. - */ - save_varprefix = context->varprefix; - context->varprefix = true; - - /* - * A Param's expansion is typically a Var, Aggref, or upper-level - * Param, which wouldn't need extra parentheses. Otherwise, insert - * parens to ensure the expression looks atomic. - */ - need_paren = !(IsA(expr, Var) || - IsA(expr, Aggref) || - IsA(expr, Param)); - if (need_paren) - appendStringInfoChar(context->buf, '('); - - get_rule_expr(expr, context, false); - - if (need_paren) - appendStringInfoChar(context->buf, ')'); - - context->varprefix = save_varprefix; - - pop_ancestor_plan(dpns, &save_dpns); - - return; - } - - /* - * Not PARAM_EXEC, or couldn't find referent: just print $N. - */ - appendStringInfo(context->buf, "$%d", param->paramid); -} - -/* - * get_simple_binary_op_name - * - * helper function for isSimpleNode - * will return single char binary operator name, or NULL if it's not - */ -static const char * -get_simple_binary_op_name(OpExpr *expr) -{ - List *args = expr->args; - - if (list_length(args) == 2) - { - /* binary operator */ - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - const char *op; - - op = generate_operator_name(expr->opno, exprType(arg1), exprType(arg2)); - if (strlen(op) == 1) - return op; - } - return NULL; -} - - -/* - * isSimpleNode - check if given node is simple (doesn't need parenthesizing) - * - * true : simple in the context of parent node's type - * false : not simple - */ -static bool -isSimpleNode(Node *node, Node *parentNode, int prettyFlags) -{ - if (!node) - return false; - - switch (nodeTag(node)) - { - case T_Var: - case T_Const: - case T_Param: - case T_CoerceToDomainValue: - case T_SetToDefault: - case T_CurrentOfExpr: - /* single words: always simple */ - return true; - - case T_ArrayRef: - case T_ArrayExpr: - case T_RowExpr: - case T_CoalesceExpr: - case T_MinMaxExpr: - case T_XmlExpr: - case T_NullIfExpr: - case T_Aggref: - case T_WindowFunc: - case T_FuncExpr: - /* function-like: name(..) or name[..] */ - return true; - - /* CASE keywords act as parentheses */ - case T_CaseExpr: - return true; - - case T_FieldSelect: - - /* - * appears simple since . has top precedence, unless parent is - * T_FieldSelect itself! - */ - return (IsA(parentNode, FieldSelect) ? false : true); - - case T_FieldStore: - - /* - * treat like FieldSelect (probably doesn't matter) - */ - return (IsA(parentNode, FieldStore) ? false : true); - - case T_CoerceToDomain: - /* maybe simple, check args */ - return isSimpleNode((Node *) ((CoerceToDomain *) node)->arg, - node, prettyFlags); - case T_RelabelType: - return isSimpleNode((Node *) ((RelabelType *) node)->arg, - node, prettyFlags); - case T_CoerceViaIO: - return isSimpleNode((Node *) ((CoerceViaIO *) node)->arg, - node, prettyFlags); - case T_ArrayCoerceExpr: - return isSimpleNode((Node *) ((ArrayCoerceExpr *) node)->arg, - node, prettyFlags); - case T_ConvertRowtypeExpr: - return isSimpleNode((Node *) ((ConvertRowtypeExpr *) node)->arg, - node, prettyFlags); - - case T_OpExpr: - { - /* depends on parent node type; needs further checking */ - if (prettyFlags & PRETTYFLAG_PAREN && IsA(parentNode, OpExpr)) - { - const char *op; - const char *parentOp; - bool is_lopriop; - bool is_hipriop; - bool is_lopriparent; - bool is_hipriparent; - - op = get_simple_binary_op_name((OpExpr *) node); - if (!op) - return false; - - /* We know only the basic operators + - and * / % */ - is_lopriop = (strchr("+-", *op) != NULL); - is_hipriop = (strchr("*/%", *op) != NULL); - if (!(is_lopriop || is_hipriop)) - return false; - - parentOp = get_simple_binary_op_name((OpExpr *) parentNode); - if (!parentOp) - return false; - - is_lopriparent = (strchr("+-", *parentOp) != NULL); - is_hipriparent = (strchr("*/%", *parentOp) != NULL); - if (!(is_lopriparent || is_hipriparent)) - return false; - - if (is_hipriop && is_lopriparent) - return true; /* op binds tighter than parent */ - - if (is_lopriop && is_hipriparent) - return false; - - /* - * Operators are same priority --- can skip parens only if - * we have (a - b) - c, not a - (b - c). - */ - if (node == (Node *) linitial(((OpExpr *) parentNode)->args)) - return true; - - return false; - } - /* else do the same stuff as for T_SubLink et al. */ - } - - /* fallthrough */ - case T_SubLink: - case T_NullTest: - case T_BooleanTest: - case T_DistinctExpr: - switch (nodeTag(parentNode)) - { - case T_FuncExpr: - { - /* special handling for casts */ - CoercionForm type = ((FuncExpr *) parentNode)->funcformat; - - if (type == COERCE_EXPLICIT_CAST || - type == COERCE_IMPLICIT_CAST) - return false; - return true; /* own parentheses */ - } - case T_BoolExpr: /* lower precedence */ - case T_ArrayRef: /* other separators */ - case T_ArrayExpr: /* other separators */ - case T_RowExpr: /* other separators */ - case T_CoalesceExpr: /* own parentheses */ - case T_MinMaxExpr: /* own parentheses */ - case T_XmlExpr: /* own parentheses */ - case T_NullIfExpr: /* other separators */ - case T_Aggref: /* own parentheses */ - case T_WindowFunc: /* own parentheses */ - case T_CaseExpr: /* other separators */ - return true; - default: - return false; - } - - case T_BoolExpr: - switch (nodeTag(parentNode)) - { - case T_BoolExpr: - if (prettyFlags & PRETTYFLAG_PAREN) - { - BoolExprType type; - BoolExprType parentType; - - type = ((BoolExpr *) node)->boolop; - parentType = ((BoolExpr *) parentNode)->boolop; - switch (type) - { - case NOT_EXPR: - case AND_EXPR: - if (parentType == AND_EXPR || parentType == OR_EXPR) - return true; - break; - case OR_EXPR: - if (parentType == OR_EXPR) - return true; - break; - } - } - return false; - case T_FuncExpr: - { - /* special handling for casts */ - CoercionForm type = ((FuncExpr *) parentNode)->funcformat; - - if (type == COERCE_EXPLICIT_CAST || - type == COERCE_IMPLICIT_CAST) - return false; - return true; /* own parentheses */ - } - case T_ArrayRef: /* other separators */ - case T_ArrayExpr: /* other separators */ - case T_RowExpr: /* other separators */ - case T_CoalesceExpr: /* own parentheses */ - case T_MinMaxExpr: /* own parentheses */ - case T_XmlExpr: /* own parentheses */ - case T_NullIfExpr: /* other separators */ - case T_Aggref: /* own parentheses */ - case T_WindowFunc: /* own parentheses */ - case T_CaseExpr: /* other separators */ - return true; - default: - return false; - } - - default: - break; - } - /* those we don't know: in dubio complexo */ - return false; -} - - -/* - * appendContextKeyword - append a keyword to buffer - * - * If prettyPrint is enabled, perform a line break, and adjust indentation. - * Otherwise, just append the keyword. - */ -static void -appendContextKeyword(deparse_context *context, const char *str, - int indentBefore, int indentAfter, int indentPlus) -{ - StringInfo buf = context->buf; - - if (PRETTY_INDENT(context)) - { - int indentAmount; - - context->indentLevel += indentBefore; - - /* remove any trailing spaces currently in the buffer ... */ - removeStringInfoSpaces(buf); - /* ... then add a newline and some spaces */ - appendStringInfoChar(buf, '\n'); - - if (context->indentLevel < PRETTYINDENT_LIMIT) - indentAmount = Max(context->indentLevel, 0) + indentPlus; - else - { - /* - * If we're indented more than PRETTYINDENT_LIMIT characters, try - * to conserve horizontal space by reducing the per-level - * indentation. For best results the scale factor here should - * divide all the indent amounts that get added to indentLevel - * (PRETTYINDENT_STD, etc). It's important that the indentation - * not grow unboundedly, else deeply-nested trees use O(N^2) - * whitespace; so we also wrap modulo PRETTYINDENT_LIMIT. - */ - indentAmount = PRETTYINDENT_LIMIT + - (context->indentLevel - PRETTYINDENT_LIMIT) / - (PRETTYINDENT_STD / 2); - indentAmount %= PRETTYINDENT_LIMIT; - /* scale/wrap logic affects indentLevel, but not indentPlus */ - indentAmount += indentPlus; - } - appendStringInfoSpaces(buf, indentAmount); - - appendStringInfoString(buf, str); - - context->indentLevel += indentAfter; - if (context->indentLevel < 0) - context->indentLevel = 0; - } - else - appendStringInfoString(buf, str); -} - -/* - * removeStringInfoSpaces - delete trailing spaces from a buffer. - * - * Possibly this should move to stringinfo.c at some point. - */ -static void -removeStringInfoSpaces(StringInfo str) -{ - while (str->len > 0 && str->data[str->len - 1] == ' ') - str->data[--(str->len)] = '\0'; -} - - -/* - * get_rule_expr_paren - deparse expr using get_rule_expr, - * embracing the string with parentheses if necessary for prettyPrint. - * - * Never embrace if prettyFlags=0, because it's done in the calling node. - * - * Any node that does *not* embrace its argument node by sql syntax (with - * parentheses, non-operator keywords like CASE/WHEN/ON, or comma etc) should - * use get_rule_expr_paren instead of get_rule_expr so parentheses can be - * added. - */ -static void -get_rule_expr_paren(Node *node, deparse_context *context, - bool showimplicit, Node *parentNode) -{ - bool need_paren; - - need_paren = PRETTY_PAREN(context) && - !isSimpleNode(node, parentNode, context->prettyFlags); - - if (need_paren) - appendStringInfoChar(context->buf, '('); - - get_rule_expr(node, context, showimplicit); - - if (need_paren) - appendStringInfoChar(context->buf, ')'); -} - - -/* ---------- - * get_rule_expr - Parse back an expression - * - * Note: showimplicit determines whether we display any implicit cast that - * is present at the top of the expression tree. It is a passed argument, - * not a field of the context struct, because we change the value as we - * recurse down into the expression. In general we suppress implicit casts - * when the result type is known with certainty (eg, the arguments of an - * OR must be boolean). We display implicit casts for arguments of functions - * and operators, since this is needed to be certain that the same function - * or operator will be chosen when the expression is re-parsed. - * ---------- - */ -static void -get_rule_expr(Node *node, deparse_context *context, - bool showimplicit) -{ - StringInfo buf = context->buf; - - if (node == NULL) - return; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - /* - * Each level of get_rule_expr must emit an indivisible term - * (parenthesized if necessary) to ensure result is reparsed into the same - * expression tree. The only exception is that when the input is a List, - * we emit the component items comma-separated with no surrounding - * decoration; this is convenient for most callers. - */ - switch (nodeTag(node)) - { - case T_Var: - (void) get_variable((Var *) node, 0, false, context); - break; - - case T_Const: - get_const_expr((Const *) node, context, 0); - break; - - case T_Param: - get_parameter((Param *) node, context); - break; - - case T_Aggref: - get_agg_expr((Aggref *) node, context, (Aggref *) node); - break; - - case T_GroupingFunc: - { - GroupingFunc *gexpr = (GroupingFunc *) node; - - appendStringInfoString(buf, "GROUPING("); - get_rule_expr((Node *) gexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_WindowFunc: - get_windowfunc_expr((WindowFunc *) node, context); - break; - - case T_ArrayRef: - { - ArrayRef *aref = (ArrayRef *) node; - bool need_parens; - - /* - * If the argument is a CaseTestExpr, we must be inside a - * FieldStore, ie, we are assigning to an element of an array - * within a composite column. Since we already punted on - * displaying the FieldStore's target information, just punt - * here too, and display only the assignment source - * expression. - */ - if (IsA(aref->refexpr, CaseTestExpr)) - { - Assert(aref->refassgnexpr); - get_rule_expr((Node *) aref->refassgnexpr, - context, showimplicit); - break; - } - - /* - * Parenthesize the argument unless it's a simple Var or a - * FieldSelect. (In particular, if it's another ArrayRef, we - * *must* parenthesize to avoid confusion.) - */ - need_parens = !IsA(aref->refexpr, Var) && - !IsA(aref->refexpr, FieldSelect); - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) aref->refexpr, context, showimplicit); - if (need_parens) - appendStringInfoChar(buf, ')'); - - /* - * If there's a refassgnexpr, we want to print the node in the - * format "array[subscripts] := refassgnexpr". This is not - * legal SQL, so decompilation of INSERT or UPDATE statements - * should always use processIndirection as part of the - * statement-level syntax. We should only see this when - * EXPLAIN tries to print the targetlist of a plan resulting - * from such a statement. - */ - if (aref->refassgnexpr) - { - Node *refassgnexpr; - - /* - * Use processIndirection to print this node's subscripts - * as well as any additional field selections or - * subscripting in immediate descendants. It returns the - * RHS expr that is actually being "assigned". - */ - refassgnexpr = processIndirection(node, context); - appendStringInfoString(buf, " := "); - get_rule_expr(refassgnexpr, context, showimplicit); - } - else - { - /* Just an ordinary array fetch, so print subscripts */ - printSubscripts(aref, context); - } - } - break; - - case T_FuncExpr: - get_func_expr((FuncExpr *) node, context, showimplicit); - break; - - case T_NamedArgExpr: - { - NamedArgExpr *na = (NamedArgExpr *) node; - - appendStringInfo(buf, "%s => ", quote_identifier(na->name)); - get_rule_expr((Node *) na->arg, context, showimplicit); - } - break; - - case T_OpExpr: - get_oper_expr((OpExpr *) node, context); - break; - - case T_DistinctExpr: - { - DistinctExpr *expr = (DistinctExpr *) node; - List *args = expr->args; - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg1, context, true, node); - appendStringInfoString(buf, " IS DISTINCT FROM "); - get_rule_expr_paren(arg2, context, true, node); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_NullIfExpr: - { - NullIfExpr *nullifexpr = (NullIfExpr *) node; - - appendStringInfoString(buf, "NULLIF("); - get_rule_expr((Node *) nullifexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_ScalarArrayOpExpr: - { - ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node; - List *args = expr->args; - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg1, context, true, node); - appendStringInfo(buf, " %s %s (", - generate_operator_name(expr->opno, - exprType(arg1), - get_base_element_type(exprType(arg2))), - expr->useOr ? "ANY" : "ALL"); - get_rule_expr_paren(arg2, context, true, node); - - /* - * There's inherent ambiguity in "x op ANY/ALL (y)" when y is - * a bare sub-SELECT. Since we're here, the sub-SELECT must - * be meant as a scalar sub-SELECT yielding an array value to - * be used in ScalarArrayOpExpr; but the grammar will - * preferentially interpret such a construct as an ANY/ALL - * SubLink. To prevent misparsing the output that way, insert - * a dummy coercion (which will be stripped by parse analysis, - * so no inefficiency is added in dump and reload). This is - * indeed most likely what the user wrote to get the construct - * accepted in the first place. - */ - if (IsA(arg2, SubLink) && - ((SubLink *) arg2)->subLinkType == EXPR_SUBLINK) - appendStringInfo(buf, "::%s", - format_type_with_typemod(exprType(arg2), - exprTypmod(arg2))); - appendStringInfoChar(buf, ')'); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_BoolExpr: - { - BoolExpr *expr = (BoolExpr *) node; - Node *first_arg = linitial(expr->args); - ListCell *arg = lnext(list_head(expr->args)); - - switch (expr->boolop) - { - case AND_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(first_arg, context, - false, node); - while (arg) - { - appendStringInfoString(buf, " AND "); - get_rule_expr_paren((Node *) lfirst(arg), context, - false, node); - arg = lnext(arg); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - case OR_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(first_arg, context, - false, node); - while (arg) - { - appendStringInfoString(buf, " OR "); - get_rule_expr_paren((Node *) lfirst(arg), context, - false, node); - arg = lnext(arg); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - case NOT_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - appendStringInfoString(buf, "NOT "); - get_rule_expr_paren(first_arg, context, - false, node); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - default: - elog(ERROR, "unrecognized boolop: %d", - (int) expr->boolop); - } - } - break; - - case T_SubLink: - get_sublink_expr((SubLink *) node, context); - break; - - case T_SubPlan: - { - SubPlan *subplan = (SubPlan *) node; - - /* - * We cannot see an already-planned subplan in rule deparsing, - * only while EXPLAINing a query plan. We don't try to - * reconstruct the original SQL, just reference the subplan - * that appears elsewhere in EXPLAIN's result. - */ - if (subplan->useHashTable) - appendStringInfo(buf, "(hashed %s)", subplan->plan_name); - else - appendStringInfo(buf, "(%s)", subplan->plan_name); - } - break; - - case T_AlternativeSubPlan: - { - AlternativeSubPlan *asplan = (AlternativeSubPlan *) node; - ListCell *lc; - - /* As above, this can only happen during EXPLAIN */ - appendStringInfoString(buf, "(alternatives: "); - foreach(lc, asplan->subplans) - { - SubPlan *splan = (SubPlan *) lfirst(lc); - - Assert(IsA(splan, SubPlan)); - if (splan->useHashTable) - appendStringInfo(buf, "hashed %s", splan->plan_name); - else - appendStringInfoString(buf, splan->plan_name); - if (lnext(lc)) - appendStringInfoString(buf, " or "); - } - appendStringInfoChar(buf, ')'); - } - break; - - case T_FieldSelect: - { - FieldSelect *fselect = (FieldSelect *) node; - Node *arg = (Node *) fselect->arg; - int fno = fselect->fieldnum; - const char *fieldname; - bool need_parens; - - /* - * Parenthesize the argument unless it's an ArrayRef or - * another FieldSelect. Note in particular that it would be - * WRONG to not parenthesize a Var argument; simplicity is not - * the issue here, having the right number of names is. - */ - need_parens = !IsA(arg, ArrayRef) &&!IsA(arg, FieldSelect); - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr(arg, context, true); - if (need_parens) - appendStringInfoChar(buf, ')'); - - /* - * Get and print the field name. - */ - fieldname = get_name_for_var_field((Var *) arg, fno, - 0, context); - appendStringInfo(buf, ".%s", quote_identifier(fieldname)); - } - break; - - case T_FieldStore: - { - FieldStore *fstore = (FieldStore *) node; - bool need_parens; - - /* - * There is no good way to represent a FieldStore as real SQL, - * so decompilation of INSERT or UPDATE statements should - * always use processIndirection as part of the - * statement-level syntax. We should only get here when - * EXPLAIN tries to print the targetlist of a plan resulting - * from such a statement. The plan case is even harder than - * ordinary rules would be, because the planner tries to - * collapse multiple assignments to the same field or subfield - * into one FieldStore; so we can see a list of target fields - * not just one, and the arguments could be FieldStores - * themselves. We don't bother to try to print the target - * field names; we just print the source arguments, with a - * ROW() around them if there's more than one. This isn't - * terribly complete, but it's probably good enough for - * EXPLAIN's purposes; especially since anything more would be - * either hopelessly confusing or an even poorer - * representation of what the plan is actually doing. - */ - need_parens = (list_length(fstore->newvals) != 1); - if (need_parens) - appendStringInfoString(buf, "ROW("); - get_rule_expr((Node *) fstore->newvals, context, showimplicit); - if (need_parens) - appendStringInfoChar(buf, ')'); - } - break; - - case T_RelabelType: - { - RelabelType *relabel = (RelabelType *) node; - Node *arg = (Node *) relabel->arg; - - if (relabel->relabelformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - relabel->resulttype, - relabel->resulttypmod, - node); - } - } - break; - - case T_CoerceViaIO: - { - CoerceViaIO *iocoerce = (CoerceViaIO *) node; - Node *arg = (Node *) iocoerce->arg; - - if (iocoerce->coerceformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - iocoerce->resulttype, - -1, - node); - } - } - break; - - case T_ArrayCoerceExpr: - { - ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; - Node *arg = (Node *) acoerce->arg; - - if (acoerce->coerceformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - acoerce->resulttype, - acoerce->resulttypmod, - node); - } - } - break; - - case T_ConvertRowtypeExpr: - { - ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node; - Node *arg = (Node *) convert->arg; - - if (convert->convertformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - convert->resulttype, -1, - node); - } - } - break; - - case T_CollateExpr: - { - CollateExpr *collate = (CollateExpr *) node; - Node *arg = (Node *) collate->arg; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg, context, showimplicit, node); - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(collate->collOid)); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_CaseExpr: - { - CaseExpr *caseexpr = (CaseExpr *) node; - ListCell *temp; - - appendContextKeyword(context, "CASE", - 0, PRETTYINDENT_VAR, 0); - if (caseexpr->arg) - { - appendStringInfoChar(buf, ' '); - get_rule_expr((Node *) caseexpr->arg, context, true); - } - foreach(temp, caseexpr->args) - { - CaseWhen *when = (CaseWhen *) lfirst(temp); - Node *w = (Node *) when->expr; - - if (caseexpr->arg) - { - /* - * The parser should have produced WHEN clauses of the - * form "CaseTestExpr = RHS", possibly with an - * implicit coercion inserted above the CaseTestExpr. - * For accurate decompilation of rules it's essential - * that we show just the RHS. However in an - * expression that's been through the optimizer, the - * WHEN clause could be almost anything (since the - * equality operator could have been expanded into an - * inline function). If we don't recognize the form - * of the WHEN clause, just punt and display it as-is. - */ - if (IsA(w, OpExpr)) - { - List *args = ((OpExpr *) w)->args; - - if (list_length(args) == 2 && - IsA(strip_implicit_coercions(linitial(args)), - CaseTestExpr)) - w = (Node *) lsecond(args); - } - } - - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "WHEN ", - 0, 0, 0); - get_rule_expr(w, context, false); - appendStringInfoString(buf, " THEN "); - get_rule_expr((Node *) when->result, context, true); - } - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "ELSE ", - 0, 0, 0); - get_rule_expr((Node *) caseexpr->defresult, context, true); - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "END", - -PRETTYINDENT_VAR, 0, 0); - } - break; - - case T_CaseTestExpr: - { - /* - * Normally we should never get here, since for expressions - * that can contain this node type we attempt to avoid - * recursing to it. But in an optimized expression we might - * be unable to avoid that (see comments for CaseExpr). If we - * do see one, print it as CASE_TEST_EXPR. - */ - appendStringInfoString(buf, "CASE_TEST_EXPR"); - } - break; - - case T_ArrayExpr: - { - ArrayExpr *arrayexpr = (ArrayExpr *) node; - - appendStringInfoString(buf, "ARRAY["); - get_rule_expr((Node *) arrayexpr->elements, context, true); - appendStringInfoChar(buf, ']'); - - /* - * If the array isn't empty, we assume its elements are - * coerced to the desired type. If it's empty, though, we - * need an explicit coercion to the array type. - */ - if (arrayexpr->elements == NIL) - appendStringInfo(buf, "::%s", - format_type_with_typemod(arrayexpr->array_typeid, -1)); - } - break; - - case T_RowExpr: - { - RowExpr *rowexpr = (RowExpr *) node; - TupleDesc tupdesc = NULL; - ListCell *arg; - int i; - char *sep; - - /* - * If it's a named type and not RECORD, we may have to skip - * dropped columns and/or claim there are NULLs for added - * columns. - */ - if (rowexpr->row_typeid != RECORDOID) - { - tupdesc = lookup_rowtype_tupdesc(rowexpr->row_typeid, -1); - Assert(list_length(rowexpr->args) <= tupdesc->natts); - } - - /* - * SQL99 allows "ROW" to be omitted when there is more than - * one column, but for simplicity we always print it. - */ - appendStringInfoString(buf, "ROW("); - sep = ""; - i = 0; - foreach(arg, rowexpr->args) - { - Node *e = (Node *) lfirst(arg); - - if (tupdesc == NULL || - !tupdesc->attrs[i]->attisdropped) - { - appendStringInfoString(buf, sep); - /* Whole-row Vars need special treatment here */ - get_rule_expr_toplevel(e, context, true); - sep = ", "; - } - i++; - } - if (tupdesc != NULL) - { - while (i < tupdesc->natts) - { - if (!tupdesc->attrs[i]->attisdropped) - { - appendStringInfoString(buf, sep); - appendStringInfoString(buf, "NULL"); - sep = ", "; - } - i++; - } - - ReleaseTupleDesc(tupdesc); - } - appendStringInfoChar(buf, ')'); - if (rowexpr->row_format == COERCE_EXPLICIT_CAST) - appendStringInfo(buf, "::%s", - format_type_with_typemod(rowexpr->row_typeid, -1)); - } - break; - - case T_RowCompareExpr: - { - RowCompareExpr *rcexpr = (RowCompareExpr *) node; - ListCell *arg; - char *sep; - - /* - * SQL99 allows "ROW" to be omitted when there is more than - * one column, but for simplicity we always print it. - */ - appendStringInfoString(buf, "(ROW("); - sep = ""; - foreach(arg, rcexpr->largs) - { - Node *e = (Node *) lfirst(arg); - - appendStringInfoString(buf, sep); - get_rule_expr(e, context, true); - sep = ", "; - } - - /* - * We assume that the name of the first-column operator will - * do for all the rest too. This is definitely open to - * failure, eg if some but not all operators were renamed - * since the construct was parsed, but there seems no way to - * be perfect. - */ - appendStringInfo(buf, ") %s ROW(", - generate_operator_name(linitial_oid(rcexpr->opnos), - exprType(linitial(rcexpr->largs)), - exprType(linitial(rcexpr->rargs)))); - sep = ""; - foreach(arg, rcexpr->rargs) - { - Node *e = (Node *) lfirst(arg); - - appendStringInfoString(buf, sep); - get_rule_expr(e, context, true); - sep = ", "; - } - appendStringInfoString(buf, "))"); - } - break; - - case T_CoalesceExpr: - { - CoalesceExpr *coalesceexpr = (CoalesceExpr *) node; - - appendStringInfoString(buf, "COALESCE("); - get_rule_expr((Node *) coalesceexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_MinMaxExpr: - { - MinMaxExpr *minmaxexpr = (MinMaxExpr *) node; - - switch (minmaxexpr->op) - { - case IS_GREATEST: - appendStringInfoString(buf, "GREATEST("); - break; - case IS_LEAST: - appendStringInfoString(buf, "LEAST("); - break; - } - get_rule_expr((Node *) minmaxexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_XmlExpr: - { - XmlExpr *xexpr = (XmlExpr *) node; - bool needcomma = false; - ListCell *arg; - ListCell *narg; - Const *con; - - switch (xexpr->op) - { - case IS_XMLCONCAT: - appendStringInfoString(buf, "XMLCONCAT("); - break; - case IS_XMLELEMENT: - appendStringInfoString(buf, "XMLELEMENT("); - break; - case IS_XMLFOREST: - appendStringInfoString(buf, "XMLFOREST("); - break; - case IS_XMLPARSE: - appendStringInfoString(buf, "XMLPARSE("); - break; - case IS_XMLPI: - appendStringInfoString(buf, "XMLPI("); - break; - case IS_XMLROOT: - appendStringInfoString(buf, "XMLROOT("); - break; - case IS_XMLSERIALIZE: - appendStringInfoString(buf, "XMLSERIALIZE("); - break; - case IS_DOCUMENT: - break; - } - if (xexpr->op == IS_XMLPARSE || xexpr->op == IS_XMLSERIALIZE) - { - if (xexpr->xmloption == XMLOPTION_DOCUMENT) - appendStringInfoString(buf, "DOCUMENT "); - else - appendStringInfoString(buf, "CONTENT "); - } - if (xexpr->name) - { - appendStringInfo(buf, "NAME %s", - quote_identifier(map_xml_name_to_sql_identifier(xexpr->name))); - needcomma = true; - } - if (xexpr->named_args) - { - if (xexpr->op != IS_XMLFOREST) - { - if (needcomma) - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, "XMLATTRIBUTES("); - needcomma = false; - } - forboth(arg, xexpr->named_args, narg, xexpr->arg_names) - { - Node *e = (Node *) lfirst(arg); - char *argname = strVal(lfirst(narg)); - - if (needcomma) - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) e, context, true); - appendStringInfo(buf, " AS %s", - quote_identifier(map_xml_name_to_sql_identifier(argname))); - needcomma = true; - } - if (xexpr->op != IS_XMLFOREST) - appendStringInfoChar(buf, ')'); - } - if (xexpr->args) - { - if (needcomma) - appendStringInfoString(buf, ", "); - switch (xexpr->op) - { - case IS_XMLCONCAT: - case IS_XMLELEMENT: - case IS_XMLFOREST: - case IS_XMLPI: - case IS_XMLSERIALIZE: - /* no extra decoration needed */ - get_rule_expr((Node *) xexpr->args, context, true); - break; - case IS_XMLPARSE: - Assert(list_length(xexpr->args) == 2); - - get_rule_expr((Node *) linitial(xexpr->args), - context, true); - - con = (Const *) lsecond(xexpr->args); - Assert(IsA(con, Const)); - Assert(!con->constisnull); - if (DatumGetBool(con->constvalue)) - appendStringInfoString(buf, - " PRESERVE WHITESPACE"); - else - appendStringInfoString(buf, - " STRIP WHITESPACE"); - break; - case IS_XMLROOT: - Assert(list_length(xexpr->args) == 3); - - get_rule_expr((Node *) linitial(xexpr->args), - context, true); - - appendStringInfoString(buf, ", VERSION "); - con = (Const *) lsecond(xexpr->args); - if (IsA(con, Const) && - con->constisnull) - appendStringInfoString(buf, "NO VALUE"); - else - get_rule_expr((Node *) con, context, false); - - con = (Const *) lthird(xexpr->args); - Assert(IsA(con, Const)); - if (con->constisnull) - /* suppress STANDALONE NO VALUE */ ; - else - { - switch (DatumGetInt32(con->constvalue)) - { - case XML_STANDALONE_YES: - appendStringInfoString(buf, - ", STANDALONE YES"); - break; - case XML_STANDALONE_NO: - appendStringInfoString(buf, - ", STANDALONE NO"); - break; - case XML_STANDALONE_NO_VALUE: - appendStringInfoString(buf, - ", STANDALONE NO VALUE"); - break; - default: - break; - } - } - break; - case IS_DOCUMENT: - get_rule_expr_paren((Node *) xexpr->args, context, false, node); - break; - } - - } - if (xexpr->op == IS_XMLSERIALIZE) - appendStringInfo(buf, " AS %s", - format_type_with_typemod(xexpr->type, - xexpr->typmod)); - if (xexpr->op == IS_DOCUMENT) - appendStringInfoString(buf, " IS DOCUMENT"); - else - appendStringInfoChar(buf, ')'); - } - break; - - case T_NullTest: - { - NullTest *ntest = (NullTest *) node; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren((Node *) ntest->arg, context, true, node); - - /* - * For scalar inputs, we prefer to print as IS [NOT] NULL, - * which is shorter and traditional. If it's a rowtype input - * but we're applying a scalar test, must print IS [NOT] - * DISTINCT FROM NULL to be semantically correct. - */ - if (ntest->argisrow || - !type_is_rowtype(exprType((Node *) ntest->arg))) - { - switch (ntest->nulltesttype) - { - case IS_NULL: - appendStringInfoString(buf, " IS NULL"); - break; - case IS_NOT_NULL: - appendStringInfoString(buf, " IS NOT NULL"); - break; - default: - elog(ERROR, "unrecognized nulltesttype: %d", - (int) ntest->nulltesttype); - } - } - else - { - switch (ntest->nulltesttype) - { - case IS_NULL: - appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL"); - break; - case IS_NOT_NULL: - appendStringInfoString(buf, " IS DISTINCT FROM NULL"); - break; - default: - elog(ERROR, "unrecognized nulltesttype: %d", - (int) ntest->nulltesttype); - } - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_BooleanTest: - { - BooleanTest *btest = (BooleanTest *) node; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren((Node *) btest->arg, context, false, node); - switch (btest->booltesttype) - { - case IS_TRUE: - appendStringInfoString(buf, " IS TRUE"); - break; - case IS_NOT_TRUE: - appendStringInfoString(buf, " IS NOT TRUE"); - break; - case IS_FALSE: - appendStringInfoString(buf, " IS FALSE"); - break; - case IS_NOT_FALSE: - appendStringInfoString(buf, " IS NOT FALSE"); - break; - case IS_UNKNOWN: - appendStringInfoString(buf, " IS UNKNOWN"); - break; - case IS_NOT_UNKNOWN: - appendStringInfoString(buf, " IS NOT UNKNOWN"); - break; - default: - elog(ERROR, "unrecognized booltesttype: %d", - (int) btest->booltesttype); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_CoerceToDomain: - { - CoerceToDomain *ctest = (CoerceToDomain *) node; - Node *arg = (Node *) ctest->arg; - - if (ctest->coercionformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr(arg, context, false); - } - else - { - get_coercion_expr(arg, context, - ctest->resulttype, - ctest->resulttypmod, - node); - } - } - break; - - case T_CoerceToDomainValue: - appendStringInfoString(buf, "VALUE"); - break; - - case T_SetToDefault: - appendStringInfoString(buf, "DEFAULT"); - break; - - case T_CurrentOfExpr: - { - CurrentOfExpr *cexpr = (CurrentOfExpr *) node; - - if (cexpr->cursor_name) - appendStringInfo(buf, "CURRENT OF %s", - quote_identifier(cexpr->cursor_name)); - else - appendStringInfo(buf, "CURRENT OF $%d", - cexpr->cursor_param); - } - break; - - case T_InferenceElem: - { - InferenceElem *iexpr = (InferenceElem *) node; - bool save_varprefix; - bool need_parens; - - /* - * InferenceElem can only refer to target relation, so a - * prefix is not useful, and indeed would cause parse errors. - */ - save_varprefix = context->varprefix; - context->varprefix = false; - - /* - * Parenthesize the element unless it's a simple Var or a bare - * function call. Follows pg_get_indexdef_worker(). - */ - need_parens = !IsA(iexpr->expr, Var); - if (IsA(iexpr->expr, FuncExpr) && - ((FuncExpr *) iexpr->expr)->funcformat == - COERCE_EXPLICIT_CALL) - need_parens = false; - - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) iexpr->expr, - context, false); - if (need_parens) - appendStringInfoChar(buf, ')'); - - context->varprefix = save_varprefix; - - if (iexpr->infercollid) - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(iexpr->infercollid)); - - /* Add the operator class name, if not default */ - if (iexpr->inferopclass) - { - Oid inferopclass = iexpr->inferopclass; - Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass); - - get_opclass_name(inferopclass, inferopcinputtype, buf); - } - } - break; - - case T_List: - { - char *sep; - ListCell *l; - - sep = ""; - foreach(l, (List *) node) - { - appendStringInfoString(buf, sep); - get_rule_expr((Node *) lfirst(l), context, showimplicit); - sep = ", "; - } - } - break; - - default: - elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); - break; - } -} - -/* - * get_rule_expr_toplevel - Parse back a toplevel expression - * - * Same as get_rule_expr(), except that if the expr is just a Var, we pass - * istoplevel = true not false to get_variable(). This causes whole-row Vars - * to get printed with decoration that will prevent expansion of "*". - * We need to use this in contexts such as ROW() and VALUES(), where the - * parser would expand "foo.*" appearing at top level. (In principle we'd - * use this in get_target_list() too, but that has additional worries about - * whether to print AS, so it needs to invoke get_variable() directly anyway.) - */ -static void -get_rule_expr_toplevel(Node *node, deparse_context *context, - bool showimplicit) -{ - if (node && IsA(node, Var)) - (void) get_variable((Var *) node, 0, true, context); - else - get_rule_expr(node, context, showimplicit); -} - -/* - * get_rule_expr_funccall - Parse back a function-call expression - * - * Same as get_rule_expr(), except that we guarantee that the output will - * look like a function call, or like one of the things the grammar treats as - * equivalent to a function call (see the func_expr_windowless production). - * This is needed in places where the grammar uses func_expr_windowless and - * you can't substitute a parenthesized a_expr. If what we have isn't going - * to look like a function call, wrap it in a dummy CAST() expression, which - * will satisfy the grammar --- and, indeed, is likely what the user wrote to - * produce such a thing. - */ -static void -get_rule_expr_funccall(Node *node, deparse_context *context, - bool showimplicit) -{ - if (looks_like_function(node)) - get_rule_expr(node, context, showimplicit); - else - { - StringInfo buf = context->buf; - - appendStringInfoString(buf, "CAST("); - /* no point in showing any top-level implicit cast */ - get_rule_expr(node, context, false); - appendStringInfo(buf, " AS %s)", - format_type_with_typemod(exprType(node), - exprTypmod(node))); - } -} - -/* - * Helper function to identify node types that satisfy func_expr_windowless. - * If in doubt, "false" is always a safe answer. - */ -static bool -looks_like_function(Node *node) -{ - if (node == NULL) - return false; /* probably shouldn't happen */ - switch (nodeTag(node)) - { - case T_FuncExpr: - /* OK, unless it's going to deparse as a cast */ - return (((FuncExpr *) node)->funcformat == COERCE_EXPLICIT_CALL); - case T_NullIfExpr: - case T_CoalesceExpr: - case T_MinMaxExpr: - case T_XmlExpr: - /* these are all accepted by func_expr_common_subexpr */ - return true; - default: - break; - } - return false; -} - - -/* - * get_oper_expr - Parse back an OpExpr node - */ -static void -get_oper_expr(OpExpr *expr, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid opno = expr->opno; - List *args = expr->args; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - if (list_length(args) == 2) - { - /* binary operator */ - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - get_rule_expr_paren(arg1, context, true, (Node *) expr); - appendStringInfo(buf, " %s ", - generate_operator_name(opno, - exprType(arg1), - exprType(arg2))); - get_rule_expr_paren(arg2, context, true, (Node *) expr); - } - else - { - /* unary operator --- but which side? */ - Node *arg = (Node *) linitial(args); - HeapTuple tp; - Form_pg_operator optup; - - tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for operator %u", opno); - optup = (Form_pg_operator) GETSTRUCT(tp); - switch (optup->oprkind) - { - case 'l': - appendStringInfo(buf, "%s ", - generate_operator_name(opno, - InvalidOid, - exprType(arg))); - get_rule_expr_paren(arg, context, true, (Node *) expr); - break; - case 'r': - get_rule_expr_paren(arg, context, true, (Node *) expr); - appendStringInfo(buf, " %s", - generate_operator_name(opno, - exprType(arg), - InvalidOid)); - break; - default: - elog(ERROR, "bogus oprkind: %d", optup->oprkind); - } - ReleaseSysCache(tp); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); -} - -/* - * get_func_expr - Parse back a FuncExpr node - */ -static void -get_func_expr(FuncExpr *expr, deparse_context *context, - bool showimplicit) -{ - StringInfo buf = context->buf; - Oid funcoid = expr->funcid; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - List *argnames; - bool use_variadic; - ListCell *l; - - /* - * If the function call came from an implicit coercion, then just show the - * first argument --- unless caller wants to see implicit coercions. - */ - if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit) - { - get_rule_expr_paren((Node *) linitial(expr->args), context, - false, (Node *) expr); - return; - } - - /* - * If the function call came from a cast, then show the first argument - * plus an explicit cast operation. - */ - if (expr->funcformat == COERCE_EXPLICIT_CAST || - expr->funcformat == COERCE_IMPLICIT_CAST) - { - Node *arg = linitial(expr->args); - Oid rettype = expr->funcresulttype; - int32 coercedTypmod; - - /* Get the typmod if this is a length-coercion function */ - (void) exprIsLengthCoercion((Node *) expr, &coercedTypmod); - - get_coercion_expr(arg, context, - rettype, coercedTypmod, - (Node *) expr); - - return; - } - - /* - * Normal function: display as proname(args). First we need to extract - * the argument datatypes. - */ - if (list_length(expr->args) > FUNC_MAX_ARGS) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg("too many arguments"))); - nargs = 0; - argnames = NIL; - foreach(l, expr->args) - { - Node *arg = (Node *) lfirst(l); - - if (IsA(arg, NamedArgExpr)) - argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); - argtypes[nargs] = exprType(arg); - nargs++; - } - - appendStringInfo(buf, "%s(", - generate_function_name(funcoid, nargs, - argnames, argtypes, - expr->funcvariadic, - &use_variadic, - context->special_exprkind)); - nargs = 0; - foreach(l, expr->args) - { - if (nargs++ > 0) - appendStringInfoString(buf, ", "); - if (use_variadic && lnext(l) == NULL) - appendStringInfoString(buf, "VARIADIC "); - get_rule_expr((Node *) lfirst(l), context, true); - } - appendStringInfoChar(buf, ')'); -} - -/* - * get_agg_expr - Parse back an Aggref node - */ -static void -get_agg_expr(Aggref *aggref, deparse_context *context, - Aggref *original_aggref) -{ - StringInfo buf = context->buf; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - bool use_variadic; - - /* - * For a combining aggregate, we look up and deparse the corresponding - * partial aggregate instead. This is necessary because our input - * argument list has been replaced; the new argument list always has just - * one element, which will point to a partial Aggref that supplies us with - * transition states to combine. - */ - if (DO_AGGSPLIT_COMBINE(aggref->aggsplit)) - { - TargetEntry *tle = linitial(aggref->args); - - Assert(list_length(aggref->args) == 1); - Assert(IsA(tle, TargetEntry)); - resolve_special_varno((Node *) tle->expr, context, original_aggref, - get_agg_combine_expr); - return; - } - - /* - * Mark as PARTIAL, if appropriate. We look to the original aggref so as - * to avoid printing this when recursing from the code just above. - */ - if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit)) - appendStringInfoString(buf, "PARTIAL "); - - /* Extract the argument types as seen by the parser */ - nargs = get_aggregate_argtypes(aggref, argtypes); - - /* Print the aggregate name, schema-qualified if needed */ - appendStringInfo(buf, "%s(%s", - generate_function_name(aggref->aggfnoid, nargs, - NIL, argtypes, - aggref->aggvariadic, - &use_variadic, - context->special_exprkind), - (aggref->aggdistinct != NIL) ? "DISTINCT " : ""); - - if (AGGKIND_IS_ORDERED_SET(aggref->aggkind)) - { - /* - * Ordered-set aggregates do not use "*" syntax. Also, we needn't - * worry about inserting VARIADIC. So we can just dump the direct - * args as-is. - */ - Assert(!aggref->aggvariadic); - get_rule_expr((Node *) aggref->aggdirectargs, context, true); - Assert(aggref->aggorder != NIL); - appendStringInfoString(buf, ") WITHIN GROUP (ORDER BY "); - get_rule_orderby(aggref->aggorder, aggref->args, false, context); - } - else - { - /* aggstar can be set only in zero-argument aggregates */ - if (aggref->aggstar) - appendStringInfoChar(buf, '*'); - else - { - ListCell *l; - int i; - - i = 0; - foreach(l, aggref->args) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - Node *arg = (Node *) tle->expr; - - Assert(!IsA(arg, NamedArgExpr)); - if (tle->resjunk) - continue; - if (i++ > 0) - appendStringInfoString(buf, ", "); - if (use_variadic && i == nargs) - appendStringInfoString(buf, "VARIADIC "); - get_rule_expr(arg, context, true); - } - } - - if (aggref->aggorder != NIL) - { - appendStringInfoString(buf, " ORDER BY "); - get_rule_orderby(aggref->aggorder, aggref->args, false, context); - } - } - - if (aggref->aggfilter != NULL) - { - appendStringInfoString(buf, ") FILTER (WHERE "); - get_rule_expr((Node *) aggref->aggfilter, context, false); - } - - appendStringInfoChar(buf, ')'); -} - -/* - * This is a helper function for get_agg_expr(). It's used when we deparse - * a combining Aggref; resolve_special_varno locates the corresponding partial - * Aggref and then calls this. - */ -static void -get_agg_combine_expr(Node *node, deparse_context *context, void *private) -{ - Aggref *aggref; - Aggref *original_aggref = private; - - if (!IsA(node, Aggref)) - elog(ERROR, "combining Aggref does not point to an Aggref"); - - aggref = (Aggref *) node; - get_agg_expr(aggref, context, original_aggref); -} - -/* - * get_windowfunc_expr - Parse back a WindowFunc node - */ -static void -get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - List *argnames; - ListCell *l; - - if (list_length(wfunc->args) > FUNC_MAX_ARGS) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg("too many arguments"))); - nargs = 0; - argnames = NIL; - foreach(l, wfunc->args) - { - Node *arg = (Node *) lfirst(l); - - if (IsA(arg, NamedArgExpr)) - argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); - argtypes[nargs] = exprType(arg); - nargs++; - } - - appendStringInfo(buf, "%s(", - generate_function_name(wfunc->winfnoid, nargs, - argnames, argtypes, - false, NULL, - context->special_exprkind)); - /* winstar can be set only in zero-argument aggregates */ - if (wfunc->winstar) - appendStringInfoChar(buf, '*'); - else - get_rule_expr((Node *) wfunc->args, context, true); - - if (wfunc->aggfilter != NULL) - { - appendStringInfoString(buf, ") FILTER (WHERE "); - get_rule_expr((Node *) wfunc->aggfilter, context, false); - } - - appendStringInfoString(buf, ") OVER "); - - foreach(l, context->windowClause) - { - WindowClause *wc = (WindowClause *) lfirst(l); - - if (wc->winref == wfunc->winref) - { - if (wc->name) - appendStringInfoString(buf, quote_identifier(wc->name)); - else - get_rule_windowspec(wc, context->windowTList, context); - break; - } - } - if (l == NULL) - { - if (context->windowClause) - elog(ERROR, "could not find window clause for winref %u", - wfunc->winref); - - /* - * In EXPLAIN, we don't have window context information available, so - * we have to settle for this: - */ - appendStringInfoString(buf, "(?)"); - } -} - -/* ---------- - * get_coercion_expr - * - * Make a string representation of a value coerced to a specific type - * ---------- - */ -static void -get_coercion_expr(Node *arg, deparse_context *context, - Oid resulttype, int32 resulttypmod, - Node *parentNode) -{ - StringInfo buf = context->buf; - - /* - * Since parse_coerce.c doesn't immediately collapse application of - * length-coercion functions to constants, what we'll typically see in - * such cases is a Const with typmod -1 and a length-coercion function - * right above it. Avoid generating redundant output. However, beware of - * suppressing casts when the user actually wrote something like - * 'foo'::text::char(3). - * - * Note: it might seem that we are missing the possibility of needing to - * print a COLLATE clause for such a Const. However, a Const could only - * have nondefault collation in a post-constant-folding tree, in which the - * length coercion would have been folded too. See also the special - * handling of CollateExpr in coerce_to_target_type(): any collation - * marking will be above the coercion node, not below it. - */ - if (arg && IsA(arg, Const) && - ((Const *) arg)->consttype == resulttype && - ((Const *) arg)->consttypmod == -1) - { - /* Show the constant without normal ::typename decoration */ - get_const_expr((Const *) arg, context, -1); - } - else - { - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg, context, false, parentNode); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - appendStringInfo(buf, "::%s", - format_type_with_typemod(resulttype, resulttypmod)); -} - -/* ---------- - * get_const_expr - * - * Make a string representation of a Const - * - * showtype can be -1 to never show "::typename" decoration, or +1 to always - * show it, or 0 to show it only if the constant wouldn't be assumed to be - * the right type by default. - * - * If the Const's collation isn't default for its type, show that too. - * We mustn't do this when showtype is -1 (since that means the caller will - * print "::typename", and we can't put a COLLATE clause in between). It's - * caller's responsibility that collation isn't missed in such cases. - * ---------- - */ -static void -get_const_expr(Const *constval, deparse_context *context, int showtype) -{ - StringInfo buf = context->buf; - Oid typoutput; - bool typIsVarlena; - char *extval; - bool needlabel = false; - - if (constval->constisnull) - { - /* - * Always label the type of a NULL constant to prevent misdecisions - * about type when reparsing. - */ - appendStringInfoString(buf, "NULL"); - if (showtype >= 0) - { - appendStringInfo(buf, "::%s", - format_type_with_typemod(constval->consttype, - constval->consttypmod)); - get_const_collation(constval, context); - } - return; - } - - getTypeOutputInfo(constval->consttype, - &typoutput, &typIsVarlena); - - extval = OidOutputFunctionCall(typoutput, constval->constvalue); - - switch (constval->consttype) - { - case INT4OID: - - /* - * INT4 can be printed without any decoration, unless it is - * negative; in that case print it as '-nnn'::integer to ensure - * that the output will re-parse as a constant, not as a constant - * plus operator. In most cases we could get away with printing - * (-nnn) instead, because of the way that gram.y handles negative - * literals; but that doesn't work for INT_MIN, and it doesn't - * seem that much prettier anyway. - */ - if (extval[0] != '-') - appendStringInfoString(buf, extval); - else - { - appendStringInfo(buf, "'%s'", extval); - needlabel = true; /* we must attach a cast */ - } - break; - - case NUMERICOID: - - /* - * NUMERIC can be printed without quotes if it looks like a float - * constant (not an integer, and not Infinity or NaN) and doesn't - * have a leading sign (for the same reason as for INT4). - */ - if (isdigit((unsigned char) extval[0]) && - strcspn(extval, "eE.") != strlen(extval)) - { - appendStringInfoString(buf, extval); - } - else - { - appendStringInfo(buf, "'%s'", extval); - needlabel = true; /* we must attach a cast */ - } - break; - - case BITOID: - case VARBITOID: - appendStringInfo(buf, "B'%s'", extval); - break; - - case BOOLOID: - if (strcmp(extval, "t") == 0) - appendStringInfoString(buf, "true"); - else - appendStringInfoString(buf, "false"); - break; - - default: - simple_quote_literal(buf, extval); - break; - } - - pfree(extval); - - if (showtype < 0) - return; - - /* - * For showtype == 0, append ::typename unless the constant will be - * implicitly typed as the right type when it is read in. - * - * XXX this code has to be kept in sync with the behavior of the parser, - * especially make_const. - */ - switch (constval->consttype) - { - case BOOLOID: - case UNKNOWNOID: - /* These types can be left unlabeled */ - needlabel = false; - break; - case INT4OID: - /* We determined above whether a label is needed */ - break; - case NUMERICOID: - - /* - * Float-looking constants will be typed as numeric, which we - * checked above; but if there's a nondefault typmod we need to - * show it. - */ - needlabel |= (constval->consttypmod >= 0); - break; - default: - needlabel = true; - break; - } - if (needlabel || showtype > 0) - appendStringInfo(buf, "::%s", - format_type_with_typemod(constval->consttype, - constval->consttypmod)); - - get_const_collation(constval, context); -} - -/* - * helper for get_const_expr: append COLLATE if needed - */ -static void -get_const_collation(Const *constval, deparse_context *context) -{ - StringInfo buf = context->buf; - - if (OidIsValid(constval->constcollid)) - { - Oid typcollation = get_typcollation(constval->consttype); - - if (constval->constcollid != typcollation) - { - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(constval->constcollid)); - } - } -} - -/* - * simple_quote_literal - Format a string as a SQL literal, append to buf - */ -static void -simple_quote_literal(StringInfo buf, const char *val) -{ - const char *valptr; - - /* - * We form the string literal according to the prevailing setting of - * standard_conforming_strings; we never use E''. User is responsible for - * making sure result is used correctly. - */ - appendStringInfoChar(buf, '\''); - for (valptr = val; *valptr; valptr++) - { - char ch = *valptr; - - if (SQL_STR_DOUBLE(ch, !standard_conforming_strings)) - appendStringInfoChar(buf, ch); - appendStringInfoChar(buf, ch); - } - appendStringInfoChar(buf, '\''); -} - - -/* ---------- - * get_sublink_expr - Parse back a sublink - * ---------- - */ -static void -get_sublink_expr(SubLink *sublink, deparse_context *context) -{ - StringInfo buf = context->buf; - Query *query = (Query *) (sublink->subselect); - char *opname = NULL; - bool need_paren; - - if (sublink->subLinkType == ARRAY_SUBLINK) - appendStringInfoString(buf, "ARRAY("); - else - appendStringInfoChar(buf, '('); - - /* - * Note that we print the name of only the first operator, when there are - * multiple combining operators. This is an approximation that could go - * wrong in various scenarios (operators in different schemas, renamed - * operators, etc) but there is not a whole lot we can do about it, since - * the syntax allows only one operator to be shown. - */ - if (sublink->testexpr) - { - if (IsA(sublink->testexpr, OpExpr)) - { - /* single combining operator */ - OpExpr *opexpr = (OpExpr *) sublink->testexpr; - - get_rule_expr(linitial(opexpr->args), context, true); - opname = generate_operator_name(opexpr->opno, - exprType(linitial(opexpr->args)), - exprType(lsecond(opexpr->args))); - } - else if (IsA(sublink->testexpr, BoolExpr)) - { - /* multiple combining operators, = or <> cases */ - char *sep; - ListCell *l; - - appendStringInfoChar(buf, '('); - sep = ""; - foreach(l, ((BoolExpr *) sublink->testexpr)->args) - { - OpExpr *opexpr = (OpExpr *) lfirst(l); - - Assert(IsA(opexpr, OpExpr)); - appendStringInfoString(buf, sep); - get_rule_expr(linitial(opexpr->args), context, true); - if (!opname) - opname = generate_operator_name(opexpr->opno, - exprType(linitial(opexpr->args)), - exprType(lsecond(opexpr->args))); - sep = ", "; - } - appendStringInfoChar(buf, ')'); - } - else if (IsA(sublink->testexpr, RowCompareExpr)) - { - /* multiple combining operators, < <= > >= cases */ - RowCompareExpr *rcexpr = (RowCompareExpr *) sublink->testexpr; - - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) rcexpr->largs, context, true); - opname = generate_operator_name(linitial_oid(rcexpr->opnos), - exprType(linitial(rcexpr->largs)), - exprType(linitial(rcexpr->rargs))); - appendStringInfoChar(buf, ')'); - } - else - elog(ERROR, "unrecognized testexpr type: %d", - (int) nodeTag(sublink->testexpr)); - } - - need_paren = true; - - switch (sublink->subLinkType) - { - case EXISTS_SUBLINK: - appendStringInfoString(buf, "EXISTS "); - break; - - case ANY_SUBLINK: - if (strcmp(opname, "=") == 0) /* Represent = ANY as IN */ - appendStringInfoString(buf, " IN "); - else - appendStringInfo(buf, " %s ANY ", opname); - break; - - case ALL_SUBLINK: - appendStringInfo(buf, " %s ALL ", opname); - break; - - case ROWCOMPARE_SUBLINK: - appendStringInfo(buf, " %s ", opname); - break; - - case EXPR_SUBLINK: - case MULTIEXPR_SUBLINK: - case ARRAY_SUBLINK: - need_paren = false; - break; - - case CTE_SUBLINK: /* shouldn't occur in a SubLink */ - default: - elog(ERROR, "unrecognized sublink type: %d", - (int) sublink->subLinkType); - break; - } - - if (need_paren) - appendStringInfoChar(buf, '('); - - get_query_def(query, buf, context->namespaces, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - - if (need_paren) - appendStringInfoString(buf, "))"); - else - appendStringInfoChar(buf, ')'); -} - - -/* ---------- - * get_from_clause - Parse back a FROM clause - * - * "prefix" is the keyword that denotes the start of the list of FROM - * elements. It is FROM when used to parse back SELECT and UPDATE, but - * is USING when parsing back DELETE. - * ---------- - */ -static void -get_from_clause(Query *query, const char *prefix, deparse_context *context) -{ - StringInfo buf = context->buf; - bool first = true; - ListCell *l; - - /* - * We use the query's jointree as a guide to what to print. However, we - * must ignore auto-added RTEs that are marked not inFromCl. (These can - * only appear at the top level of the jointree, so it's sufficient to - * check here.) This check also ensures we ignore the rule pseudo-RTEs - * for NEW and OLD. - */ - foreach(l, query->jointree->fromlist) - { - Node *jtnode = (Node *) lfirst(l); - - if (IsA(jtnode, RangeTblRef)) - { - int varno = ((RangeTblRef *) jtnode)->rtindex; - RangeTblEntry *rte = rt_fetch(varno, query->rtable); - - if (!rte->inFromCl) - continue; - } - - if (first) - { - appendContextKeyword(context, prefix, - -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); - first = false; - - get_from_clause_item(jtnode, query, context); - } - else - { - StringInfoData itembuf; - - appendStringInfoString(buf, ", "); - - /* - * Put the new FROM item's text into itembuf so we can decide - * after we've got it whether or not it needs to go on a new line. - */ - initStringInfo(&itembuf); - context->buf = &itembuf; - - get_from_clause_item(jtnode, query, context); - - /* Restore context's output buffer */ - context->buf = buf; - - /* Consider line-wrapping if enabled */ - if (PRETTY_INDENT(context) && context->wrapColumn >= 0) - { - /* Does the new item start with a new line? */ - if (itembuf.len > 0 && itembuf.data[0] == '\n') - { - /* If so, we shouldn't add anything */ - /* instead, remove any trailing spaces currently in buf */ - removeStringInfoSpaces(buf); - } - else - { - char *trailing_nl; - - /* Locate the start of the current line in the buffer */ - trailing_nl = strrchr(buf->data, '\n'); - if (trailing_nl == NULL) - trailing_nl = buf->data; - else - trailing_nl++; - - /* - * Add a newline, plus some indentation, if the new item - * would cause an overflow. - */ - if (strlen(trailing_nl) + itembuf.len > context->wrapColumn) - appendContextKeyword(context, "", -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_VAR); - } - } - - /* Add the new item */ - appendStringInfoString(buf, itembuf.data); - - /* clean up */ - pfree(itembuf.data); - } - } -} - -static void -get_from_clause_item(Node *jtnode, Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); - - if (IsA(jtnode, RangeTblRef)) - { - int varno = ((RangeTblRef *) jtnode)->rtindex; - RangeTblEntry *rte = rt_fetch(varno, query->rtable); - char *refname = get_rtable_name(varno, context); - deparse_columns *colinfo = deparse_columns_fetch(varno, dpns); - RangeTblFunction *rtfunc1 = NULL; - bool printalias; - CitusRTEKind rteKind = GetRangeTblKind(rte); - - if (rte->lateral) - appendStringInfoString(buf, "LATERAL "); - - /* Print the FROM item proper */ - switch (rte->rtekind) - { - case RTE_RELATION: - /* Normal relation RTE */ - appendStringInfo(buf, "%s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, - context->namespaces)); - break; - case RTE_SUBQUERY: - /* Subquery RTE */ - appendStringInfoChar(buf, '('); - get_query_def(rte->subquery, buf, context->namespaces, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - appendStringInfoChar(buf, ')'); - break; - case RTE_FUNCTION: - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfoString(buf, - generate_fragment_name(fragmentSchemaName, - fragmentTableName)); - break; - } - - /* Function RTE */ - rtfunc1 = (RangeTblFunction *) linitial(rte->functions); - - /* - * Omit ROWS FROM() syntax for just one function, unless it - * has both a coldeflist and WITH ORDINALITY. If it has both, - * we must use ROWS FROM() syntax to avoid ambiguity about - * whether the coldeflist includes the ordinality column. - */ - if (list_length(rte->functions) == 1 && - (rtfunc1->funccolnames == NIL || !rte->funcordinality)) - { - get_rule_expr_funccall(rtfunc1->funcexpr, context, true); - /* we'll print the coldeflist below, if it has one */ - } - else - { - bool all_unnest; - ListCell *lc; - - /* - * If all the function calls in the list are to unnest, - * and none need a coldeflist, then collapse the list back - * down to UNNEST(args). (If we had more than one - * built-in unnest function, this would get more - * difficult.) - * - * XXX This is pretty ugly, since it makes not-terribly- - * future-proof assumptions about what the parser would do - * with the output; but the alternative is to emit our - * nonstandard ROWS FROM() notation for what might have - * been a perfectly spec-compliant multi-argument - * UNNEST(). - */ - all_unnest = true; - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - - if (!IsA(rtfunc->funcexpr, FuncExpr) || - ((FuncExpr *) rtfunc->funcexpr)->funcid != F_ARRAY_UNNEST || - rtfunc->funccolnames != NIL) - { - all_unnest = false; - break; - } - } - - if (all_unnest) - { - List *allargs = NIL; - - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - List *args = ((FuncExpr *) rtfunc->funcexpr)->args; - - allargs = list_concat(allargs, list_copy(args)); - } - - appendStringInfoString(buf, "UNNEST("); - get_rule_expr((Node *) allargs, context, true); - appendStringInfoChar(buf, ')'); - } - else - { - int funcno = 0; - - appendStringInfoString(buf, "ROWS FROM("); - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - - if (funcno > 0) - appendStringInfoString(buf, ", "); - get_rule_expr_funccall(rtfunc->funcexpr, context, true); - if (rtfunc->funccolnames != NIL) - { - /* Reconstruct the column definition list */ - appendStringInfoString(buf, " AS "); - get_from_clause_coldeflist(rtfunc, - NULL, - context); - } - funcno++; - } - appendStringInfoChar(buf, ')'); - } - /* prevent printing duplicate coldeflist below */ - rtfunc1 = NULL; - } - if (rte->funcordinality) - appendStringInfoString(buf, " WITH ORDINALITY"); - break; - case RTE_VALUES: - /* Values list RTE */ - appendStringInfoChar(buf, '('); - get_values_def(rte->values_lists, context); - appendStringInfoChar(buf, ')'); - break; - case RTE_CTE: - appendStringInfoString(buf, quote_identifier(rte->ctename)); - break; - default: - elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind); - break; - } - - /* Print the relation alias, if needed */ - printalias = false; - if (rte->alias != NULL) - { - /* Always print alias if user provided one */ - printalias = true; - } - else if (colinfo->printaliases) - { - /* Always print alias if we need to print column aliases */ - printalias = true; - } - else if (rte->rtekind == RTE_RELATION) - { - /* - * No need to print alias if it's same as relation name (this - * would normally be the case, but not if set_rtable_names had to - * resolve a conflict). - */ - if (strcmp(refname, get_relation_name(rte->relid)) != 0) - printalias = true; - } - else if (rte->rtekind == RTE_FUNCTION) - { - /* - * For a function RTE, always print alias. This covers possible - * renaming of the function and/or instability of the - * FigureColname rules for things that aren't simple functions. - * Note we'd need to force it anyway for the columndef list case. - */ - printalias = true; - } - else if (rte->rtekind == RTE_VALUES) - { - /* Alias is syntactically required for VALUES */ - printalias = true; - } - else if (rte->rtekind == RTE_CTE) - { - /* - * No need to print alias if it's same as CTE name (this would - * normally be the case, but not if set_rtable_names had to - * resolve a conflict). - */ - if (strcmp(refname, rte->ctename) != 0) - printalias = true; - } - else if (rte->rtekind == RTE_SUBQUERY) - { - /* subquery requires alias too */ - printalias = true; - } - if (printalias) - appendStringInfo(buf, " %s", quote_identifier(refname)); - - /* Print the column definitions or aliases, if needed */ - if (rtfunc1 && rtfunc1->funccolnames != NIL) - { - /* Reconstruct the columndef list, which is also the aliases */ - get_from_clause_coldeflist(rtfunc1, colinfo, context); - } - else if (GetRangeTblKind(rte) != CITUS_RTE_SHARD) - { - /* Else print column aliases as needed */ - get_column_alias_list(colinfo, context); - } - - /* Tablesample clause must go after any alias */ - if ((rteKind == CITUS_RTE_RELATION || rteKind == CITUS_RTE_SHARD) && - rte->tablesample) - { - get_tablesample_def(rte->tablesample, context); - } - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); - bool need_paren_on_right; - - need_paren_on_right = PRETTY_PAREN(context) && - !IsA(j->rarg, RangeTblRef) && - !(IsA(j->rarg, JoinExpr) &&((JoinExpr *) j->rarg)->alias != NULL); - - if (!PRETTY_PAREN(context) || j->alias != NULL) - appendStringInfoChar(buf, '('); - - get_from_clause_item(j->larg, query, context); - - switch (j->jointype) - { - case JOIN_INNER: - if (j->quals) - appendContextKeyword(context, " JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - else - appendContextKeyword(context, " CROSS JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_LEFT: - appendContextKeyword(context, " LEFT JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_FULL: - appendContextKeyword(context, " FULL JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_RIGHT: - appendContextKeyword(context, " RIGHT JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - default: - elog(ERROR, "unrecognized join type: %d", - (int) j->jointype); - } - - if (need_paren_on_right) - appendStringInfoChar(buf, '('); - get_from_clause_item(j->rarg, query, context); - if (need_paren_on_right) - appendStringInfoChar(buf, ')'); - - if (j->usingClause) - { - ListCell *lc; - bool first = true; - - appendStringInfoString(buf, " USING ("); - /* Use the assigned names, not what's in usingClause */ - foreach(lc, colinfo->usingNames) - { - char *colname = (char *) lfirst(lc); - - if (first) - first = false; - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, quote_identifier(colname)); - } - appendStringInfoChar(buf, ')'); - } - else if (j->quals) - { - appendStringInfoString(buf, " ON "); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr(j->quals, context, false); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - else if (j->jointype != JOIN_INNER) - { - /* If we didn't say CROSS JOIN above, we must provide an ON */ - appendStringInfoString(buf, " ON TRUE"); - } - - if (!PRETTY_PAREN(context) || j->alias != NULL) - appendStringInfoChar(buf, ')'); - - /* Yes, it's correct to put alias after the right paren ... */ - if (j->alias != NULL) - { - appendStringInfo(buf, " %s", - quote_identifier(j->alias->aliasname)); - get_column_alias_list(colinfo, context); - } - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); -} - -/* - * get_column_alias_list - print column alias list for an RTE - * - * Caller must already have printed the relation's alias name. - */ -static void -get_column_alias_list(deparse_columns *colinfo, deparse_context *context) -{ - StringInfo buf = context->buf; - int i; - bool first = true; - - /* Don't print aliases if not needed */ - if (!colinfo->printaliases) - return; - - for (i = 0; i < colinfo->num_new_cols; i++) - { - char *colname = colinfo->new_colnames[i]; - - if (first) - { - appendStringInfoChar(buf, '('); - first = false; - } - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, quote_identifier(colname)); - } - if (!first) - appendStringInfoChar(buf, ')'); -} - -/* - * get_from_clause_coldeflist - reproduce FROM clause coldeflist - * - * When printing a top-level coldeflist (which is syntactically also the - * relation's column alias list), use column names from colinfo. But when - * printing a coldeflist embedded inside ROWS FROM(), we prefer to use the - * original coldeflist's names, which are available in rtfunc->funccolnames. - * Pass NULL for colinfo to select the latter behavior. - * - * The coldeflist is appended immediately (no space) to buf. Caller is - * responsible for ensuring that an alias or AS is present before it. - */ -static void -get_from_clause_coldeflist(RangeTblFunction *rtfunc, - deparse_columns *colinfo, - deparse_context *context) -{ - StringInfo buf = context->buf; - ListCell *l1; - ListCell *l2; - ListCell *l3; - ListCell *l4; - int i; - - appendStringInfoChar(buf, '('); - - /* there's no forfour(), so must chase one list the hard way */ - i = 0; - l4 = list_head(rtfunc->funccolnames); - forthree(l1, rtfunc->funccoltypes, - l2, rtfunc->funccoltypmods, - l3, rtfunc->funccolcollations) - { - Oid atttypid = lfirst_oid(l1); - int32 atttypmod = lfirst_int(l2); - Oid attcollation = lfirst_oid(l3); - char *attname; - - if (colinfo) - attname = colinfo->colnames[i]; - else - attname = strVal(lfirst(l4)); - - Assert(attname); /* shouldn't be any dropped columns here */ - - if (i > 0) - appendStringInfoString(buf, ", "); - appendStringInfo(buf, "%s %s", - quote_identifier(attname), - format_type_with_typemod(atttypid, atttypmod)); - if (OidIsValid(attcollation) && - attcollation != get_typcollation(atttypid)) - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(attcollation)); - - l4 = lnext(l4); - i++; - } - - appendStringInfoChar(buf, ')'); -} - -/* - * get_tablesample_def - print a TableSampleClause - */ -static void -get_tablesample_def(TableSampleClause *tablesample, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid argtypes[1]; - int nargs; - ListCell *l; - - /* - * We should qualify the handler's function name if it wouldn't be - * resolved by lookup in the current search path. - */ - argtypes[0] = INTERNALOID; - appendStringInfo(buf, " TABLESAMPLE %s (", - generate_function_name(tablesample->tsmhandler, 1, - NIL, argtypes, - false, NULL, EXPR_KIND_NONE)); - - nargs = 0; - foreach(l, tablesample->args) - { - if (nargs++ > 0) - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) lfirst(l), context, false); - } - appendStringInfoChar(buf, ')'); - - if (tablesample->repeatable != NULL) - { - appendStringInfoString(buf, " REPEATABLE ("); - get_rule_expr((Node *) tablesample->repeatable, context, false); - appendStringInfoChar(buf, ')'); - } -} - -/* - * get_opclass_name - fetch name of an index operator class - * - * The opclass name is appended (after a space) to buf. - * - * Output is suppressed if the opclass is the default for the given - * actual_datatype. (If you don't want this behavior, just pass - * InvalidOid for actual_datatype.) - */ -static void -get_opclass_name(Oid opclass, Oid actual_datatype, - StringInfo buf) -{ - HeapTuple ht_opc; - Form_pg_opclass opcrec; - char *opcname; - char *nspname; - - ht_opc = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); - if (!HeapTupleIsValid(ht_opc)) - elog(ERROR, "cache lookup failed for opclass %u", opclass); - opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc); - - if (!OidIsValid(actual_datatype) || - GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) - { - /* Okay, we need the opclass name. Do we need to qualify it? */ - opcname = NameStr(opcrec->opcname); - if (OpclassIsVisible(opclass)) - appendStringInfo(buf, " %s", quote_identifier(opcname)); - else - { - nspname = get_namespace_name(opcrec->opcnamespace); - appendStringInfo(buf, " %s.%s", - quote_identifier(nspname), - quote_identifier(opcname)); - } - } - ReleaseSysCache(ht_opc); -} - -/* - * processIndirection - take care of array and subfield assignment - * - * We strip any top-level FieldStore or assignment ArrayRef nodes that - * appear in the input, printing them as decoration for the base column - * name (which we assume the caller just printed). We might also need to - * strip CoerceToDomain nodes, but only ones that appear above assignment - * nodes. - * - * Returns the subexpression that's to be assigned. - */ -static Node * -processIndirection(Node *node, deparse_context *context) -{ - StringInfo buf = context->buf; - CoerceToDomain *cdomain = NULL; - - for (;;) - { - if (node == NULL) - break; - if (IsA(node, FieldStore)) - { - FieldStore *fstore = (FieldStore *) node; - Oid typrelid; - char *fieldname; - - /* lookup tuple type */ - typrelid = get_typ_typrelid(fstore->resulttype); - if (!OidIsValid(typrelid)) - elog(ERROR, "argument type %s of FieldStore is not a tuple type", - format_type_be(fstore->resulttype)); - - /* - * Print the field name. There should only be one target field in - * stored rules. There could be more than that in executable - * target lists, but this function cannot be used for that case. - */ - Assert(list_length(fstore->fieldnums) == 1); - fieldname = get_relid_attribute_name(typrelid, - linitial_int(fstore->fieldnums)); - appendStringInfo(buf, ".%s", quote_identifier(fieldname)); - - /* - * We ignore arg since it should be an uninteresting reference to - * the target column or subcolumn. - */ - node = (Node *) linitial(fstore->newvals); - } - else if (IsA(node, ArrayRef)) - { - ArrayRef *aref = (ArrayRef *) node; - - if (aref->refassgnexpr == NULL) - break; - printSubscripts(aref, context); - - /* - * We ignore refexpr since it should be an uninteresting reference - * to the target column or subcolumn. - */ - node = (Node *) aref->refassgnexpr; - } - else if (IsA(node, CoerceToDomain)) - { - cdomain = (CoerceToDomain *) node; - /* If it's an explicit domain coercion, we're done */ - if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) - break; - /* Tentatively descend past the CoerceToDomain */ - node = (Node *) cdomain->arg; - } - else - break; - } - - /* - * If we descended past a CoerceToDomain whose argument turned out not to - * be a FieldStore or array assignment, back up to the CoerceToDomain. - * (This is not enough to be fully correct if there are nested implicit - * CoerceToDomains, but such cases shouldn't ever occur.) - */ - if (cdomain && node == (Node *) cdomain->arg) - node = (Node *) cdomain; - - return node; -} - -static void -printSubscripts(ArrayRef *aref, deparse_context *context) -{ - StringInfo buf = context->buf; - ListCell *lowlist_item; - ListCell *uplist_item; - - lowlist_item = list_head(aref->reflowerindexpr); /* could be NULL */ - foreach(uplist_item, aref->refupperindexpr) - { - appendStringInfoChar(buf, '['); - if (lowlist_item) - { - /* If subexpression is NULL, get_rule_expr prints nothing */ - get_rule_expr((Node *) lfirst(lowlist_item), context, false); - appendStringInfoChar(buf, ':'); - lowlist_item = lnext(lowlist_item); - } - /* If subexpression is NULL, get_rule_expr prints nothing */ - get_rule_expr((Node *) lfirst(uplist_item), context, false); - appendStringInfoChar(buf, ']'); - } -} - -/* - * get_relation_name - * Get the unqualified name of a relation specified by OID - * - * This differs from the underlying get_rel_name() function in that it will - * throw error instead of silently returning NULL if the OID is bad. - */ -static char * -get_relation_name(Oid relid) -{ - char *relname = get_rel_name(relid); - - if (!relname) - elog(ERROR, "cache lookup failed for relation %u", relid); - return relname; -} - -/* - * generate_relation_or_shard_name - * Compute the name to display for a relation or shard - * - * If the provided relid is equal to the provided distrelid, this function - * returns a shard-extended relation name; otherwise, it falls through to a - * simple generate_relation_name call. - */ -static char * -generate_relation_or_shard_name(Oid relid, Oid distrelid, int64 shardid, - List *namespaces) -{ - char *relname = NULL; - - if (relid == distrelid) - { - relname = get_relation_name(relid); - - if (shardid > 0) - { - Oid schemaOid = get_rel_namespace(relid); - char *schemaName = get_namespace_name(schemaOid); - - AppendShardIdToName(&relname, shardid); - - relname = quote_qualified_identifier(schemaName, relname); - } - } - else - { - relname = generate_relation_name(relid, namespaces); - } - - return relname; -} - -/* - * generate_relation_name - * Compute the name to display for a relation specified by OID - * - * The result includes all necessary quoting and schema-prefixing. - * - * If namespaces isn't NIL, it must be a list of deparse_namespace nodes. - * We will forcibly qualify the relation name if it equals any CTE name - * visible in the namespace list. - */ -char * -generate_relation_name(Oid relid, List *namespaces) -{ - HeapTuple tp; - Form_pg_class reltup; - bool need_qual; - ListCell *nslist; - char *relname; - char *nspname; - char *result; - - tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for relation %u", relid); - reltup = (Form_pg_class) GETSTRUCT(tp); - relname = NameStr(reltup->relname); - - /* Check for conflicting CTE name */ - need_qual = false; - foreach(nslist, namespaces) - { - deparse_namespace *dpns = (deparse_namespace *) lfirst(nslist); - ListCell *ctlist; - - foreach(ctlist, dpns->ctes) - { - CommonTableExpr *cte = (CommonTableExpr *) lfirst(ctlist); - - if (strcmp(cte->ctename, relname) == 0) - { - need_qual = true; - break; - } - } - if (need_qual) - break; - } - - /* Otherwise, qualify the name if not visible in search path */ - if (!need_qual) - need_qual = !RelationIsVisible(relid); - - if (need_qual) - nspname = get_namespace_name(reltup->relnamespace); - else - nspname = NULL; - - result = quote_qualified_identifier(nspname, relname); - - ReleaseSysCache(tp); - - return result; -} - -/* - * generate_fragment_name - * Compute the name to display for a shard or merged table - * - * The result includes all necessary quoting and schema-prefixing. The schema - * name can be NULL for regular shards. For merged tables, they are always - * declared within a job-specific schema, and therefore can't have null schema - * names. - */ -static char * -generate_fragment_name(char *schemaName, char *tableName) -{ - StringInfo fragmentNameString = makeStringInfo(); - - if (schemaName != NULL) - { - appendStringInfo(fragmentNameString, "%s.%s", quote_identifier(schemaName), - quote_identifier(tableName)); - } - else - { - appendStringInfoString(fragmentNameString, quote_identifier(tableName)); - } - - return fragmentNameString->data; -} - -/* - * generate_function_name - * Compute the name to display for a function specified by OID, - * given that it is being called with the specified actual arg names and - * types. (Those matter because of ambiguous-function resolution rules.) - * - * If we're dealing with a potentially variadic function (in practice, this - * means a FuncExpr or Aggref, not some other way of calling a function), then - * has_variadic must specify whether variadic arguments have been merged, - * and *use_variadic_p will be set to indicate whether to print VARIADIC in - * the output. For non-FuncExpr cases, has_variadic should be FALSE and - * use_variadic_p can be NULL. - * - * The result includes all necessary quoting and schema-prefixing. - */ -static char * -generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, - bool has_variadic, bool *use_variadic_p, - ParseExprKind special_exprkind) -{ - char *result; - HeapTuple proctup; - Form_pg_proc procform; - char *proname; - bool use_variadic; - char *nspname; - FuncDetailCode p_result; - Oid p_funcid; - Oid p_rettype; - bool p_retset; - int p_nvargs; - Oid p_vatype; - Oid *p_true_typeids; - bool force_qualify = false; - - proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); - if (!HeapTupleIsValid(proctup)) - elog(ERROR, "cache lookup failed for function %u", funcid); - procform = (Form_pg_proc) GETSTRUCT(proctup); - proname = NameStr(procform->proname); - - /* - * Due to parser hacks to avoid needing to reserve CUBE, we need to force - * qualification in some special cases. - */ - if (special_exprkind == EXPR_KIND_GROUP_BY) - { - if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0) - force_qualify = true; - } - - /* - * Determine whether VARIADIC should be printed. We must do this first - * since it affects the lookup rules in func_get_detail(). - * - * Currently, we always print VARIADIC if the function has a merged - * variadic-array argument. Note that this is always the case for - * functions taking a VARIADIC argument type other than VARIADIC ANY. - * - * In principle, if VARIADIC wasn't originally specified and the array - * actual argument is deconstructable, we could print the array elements - * separately and not print VARIADIC, thus more nearly reproducing the - * original input. For the moment that seems like too much complication - * for the benefit, and anyway we do not know whether VARIADIC was - * originally specified if it's a non-ANY type. - */ - if (use_variadic_p) - { - /* Parser should not have set funcvariadic unless fn is variadic */ - Assert(!has_variadic || OidIsValid(procform->provariadic)); - use_variadic = has_variadic; - *use_variadic_p = use_variadic; - } - else - { - Assert(!has_variadic); - use_variadic = false; - } - - /* - * The idea here is to schema-qualify only if the parser would fail to - * resolve the correct function given the unqualified func name with the - * specified argtypes and VARIADIC flag. But if we already decided to - * force qualification, then we can skip the lookup and pretend we didn't - * find it. - */ - if (!force_qualify) - p_result = func_get_detail(list_make1(makeString(proname)), - NIL, argnames, nargs, argtypes, - !use_variadic, true, - &p_funcid, &p_rettype, - &p_retset, &p_nvargs, &p_vatype, - &p_true_typeids, NULL); - else - { - p_result = FUNCDETAIL_NOTFOUND; - p_funcid = InvalidOid; - } - - if ((p_result == FUNCDETAIL_NORMAL || - p_result == FUNCDETAIL_AGGREGATE || - p_result == FUNCDETAIL_WINDOWFUNC) && - p_funcid == funcid) - nspname = NULL; - else - nspname = get_namespace_name(procform->pronamespace); - - result = quote_qualified_identifier(nspname, proname); - - ReleaseSysCache(proctup); - - return result; -} - -/* - * generate_operator_name - * Compute the name to display for an operator specified by OID, - * given that it is being called with the specified actual arg types. - * (Arg types matter because of ambiguous-operator resolution rules. - * Pass InvalidOid for unused arg of a unary operator.) - * - * The result includes all necessary quoting and schema-prefixing, - * plus the OPERATOR() decoration needed to use a qualified operator name - * in an expression. - */ -static char * -generate_operator_name(Oid operid, Oid arg1, Oid arg2) -{ - StringInfoData buf; - HeapTuple opertup; - Form_pg_operator operform; - char *oprname; - char *nspname; - - initStringInfo(&buf); - - opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operid)); - if (!HeapTupleIsValid(opertup)) - elog(ERROR, "cache lookup failed for operator %u", operid); - operform = (Form_pg_operator) GETSTRUCT(opertup); - oprname = NameStr(operform->oprname); - - /* - * Unlike generate_operator_name() in postgres/src/backend/utils/adt/ruleutils.c, - * we don't check if the operator is in current namespace or not. This is - * because this check is costly when the operator is not in current namespace. - */ - nspname = get_namespace_name(operform->oprnamespace); - Assert(nspname != NULL); - appendStringInfo(&buf, "OPERATOR(%s.", quote_identifier(nspname)); - appendStringInfoString(&buf, oprname); - appendStringInfoChar(&buf, ')'); - - ReleaseSysCache(opertup); - - return buf.data; -} - -#endif /* (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 90700) */ diff --git a/src/backend/distributed/utils/statistics_collection.c b/src/backend/distributed/utils/statistics_collection.c index 80ca7aa38..fcdb6100e 100644 --- a/src/backend/distributed/utils/statistics_collection.c +++ b/src/backend/distributed/utils/statistics_collection.c @@ -13,10 +13,7 @@ #include "citus_version.h" #include "fmgr.h" #include "utils/uuid.h" - -#if PG_VERSION_NUM >= 100000 #include "utils/backend_random.h" -#endif bool EnableStatisticsCollection = true; /* send basic usage statistics to Citus */ @@ -48,10 +45,7 @@ typedef struct utsname #include "utils/builtins.h" #include "utils/json.h" #include "utils/jsonb.h" - -#if PG_VERSION_NUM >= 100000 #include "utils/fmgrprotos.h" -#endif static size_t StatisticsCallback(char *contents, size_t size, size_t count, void *userData); @@ -605,15 +599,12 @@ citus_server_id(PG_FUNCTION_ARGS) { uint8 *buf = (uint8 *) palloc(UUID_LEN); -#if PG_VERSION_NUM >= 100000 - /* * If pg_backend_random() fails, fall-back to using random(). In previous * versions of postgres we don't have pg_backend_random(), so use it by * default in that case. */ if (!pg_backend_random((char *) buf, UUID_LEN)) -#endif { int bufIdx = 0; for (bufIdx = 0; bufIdx < UUID_LEN; bufIdx++) diff --git a/src/backend/distributed/worker/task_tracker.c b/src/backend/distributed/worker/task_tracker.c index ae9da06cb..8bd110007 100644 --- a/src/backend/distributed/worker/task_tracker.c +++ b/src/backend/distributed/worker/task_tracker.c @@ -588,22 +588,10 @@ TaskTrackerShmemInit(void) if (!alreadyInitialized) { -#if (PG_VERSION_NUM >= 100000) WorkerTasksSharedState->taskHashTrancheId = LWLockNewTrancheId(); WorkerTasksSharedState->taskHashTrancheName = "Worker Task Hash Tranche"; LWLockRegisterTranche(WorkerTasksSharedState->taskHashTrancheId, WorkerTasksSharedState->taskHashTrancheName); -#else - - /* initialize lwlock protecting the task tracker hash table */ - LWLockTranche *tranche = &WorkerTasksSharedState->taskHashLockTranche; - - WorkerTasksSharedState->taskHashTrancheId = LWLockNewTrancheId(); - tranche->array_base = &WorkerTasksSharedState->taskHashLock; - tranche->array_stride = sizeof(LWLock); - tranche->name = "Worker Task Hash Tranche"; - LWLockRegisterTranche(WorkerTasksSharedState->taskHashTrancheId, tranche); -#endif LWLockInitialize(&WorkerTasksSharedState->taskHashLock, WorkerTasksSharedState->taskHashTrancheId); diff --git a/src/backend/distributed/worker/task_tracker_protocol.c b/src/backend/distributed/worker/task_tracker_protocol.c index c33df2a01..380325a89 100644 --- a/src/backend/distributed/worker/task_tracker_protocol.c +++ b/src/backend/distributed/worker/task_tracker_protocol.c @@ -347,13 +347,8 @@ CreateJobSchema(StringInfo schemaName) createSchemaStmt->schemaElts = NIL; /* actually create schema with the current user as owner */ -#if (PG_VERSION_NUM >= 100000) createSchemaStmt->authrole = ¤tUserRole; CreateSchemaCommand(createSchemaStmt, queryString, -1, -1); -#else - createSchemaStmt->authrole = (Node *) ¤tUserRole; - CreateSchemaCommand(createSchemaStmt, queryString); -#endif CommandCounterIncrement(); diff --git a/src/backend/distributed/worker/worker_data_fetch_protocol.c b/src/backend/distributed/worker/worker_data_fetch_protocol.c index 7fa65acb5..bd0b38447 100644 --- a/src/backend/distributed/worker/worker_data_fetch_protocol.c +++ b/src/backend/distributed/worker/worker_data_fetch_protocol.c @@ -48,10 +48,8 @@ #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/lsyscache.h" -#if (PG_VERSION_NUM >= 100000) #include "utils/regproc.h" #include "utils/varlena.h" -#endif /* Local functions forward declarations */ @@ -660,9 +658,7 @@ ParseTreeNode(const char *ddlCommand) { Node *parseTreeNode = ParseTreeRawStmt(ddlCommand); -#if (PG_VERSION_NUM >= 100000) parseTreeNode = ((RawStmt *) parseTreeNode)->stmt; -#endif return parseTreeNode; } @@ -874,13 +870,8 @@ AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName) Form_pg_sequence sequenceData = pg_get_sequencedef(sequenceId); int64 startValue = 0; int64 maxValue = 0; -#if (PG_VERSION_NUM >= 100000) int64 sequenceMaxValue = sequenceData->seqmax; int64 sequenceMinValue = sequenceData->seqmin; -#else - int64 sequenceMaxValue = sequenceData->max_value; - int64 sequenceMinValue = sequenceData->min_value; -#endif /* calculate min/max values that the sequence can generate in this worker */ @@ -951,11 +942,7 @@ SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg) } } -#if (PG_VERSION_NUM >= 100000) defElem = makeDefElem((char *) name, arg, -1); -#else - defElem = makeDefElem((char *) name, arg); -#endif statement->options = lappend(statement->options, defElem); } diff --git a/src/backend/distributed/worker/worker_merge_protocol.c b/src/backend/distributed/worker/worker_merge_protocol.c index 1ea23afe4..3505fd246 100644 --- a/src/backend/distributed/worker/worker_merge_protocol.c +++ b/src/backend/distributed/worker/worker_merge_protocol.c @@ -375,15 +375,11 @@ RemoveJobSchema(StringInfo schemaName) * can suppress notice messages that are typically displayed during * cascading deletes. */ -#if (PG_VERSION_NUM >= 100000) performDeletion(&schemaObject, DROP_CASCADE, PERFORM_DELETION_INTERNAL | PERFORM_DELETION_QUIETLY | PERFORM_DELETION_SKIP_ORIGINAL | PERFORM_DELETION_SKIP_EXTENSIONS); -#else - deleteWhatDependsOn(&schemaObject, false); -#endif CommandCounterIncrement(); @@ -423,12 +419,8 @@ CreateTaskTable(StringInfo schemaName, StringInfo relationName, createStatement = CreateStatement(relation, columnDefinitionList); -#if (PG_VERSION_NUM >= 100000) relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL, NULL); -#else - relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL); -#endif relationId = relationObject.objectId; Assert(relationId != InvalidOid); @@ -572,16 +564,11 @@ CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName, copyStatement = CopyStatement(relation, fullFilename->data); if (BinaryWorkerCopyFormat) { -#if (PG_VERSION_NUM >= 100000) DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary"), -1); -#else - DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary")); -#endif copyStatement->options = list_make1(copyOption); } -#if (PG_VERSION_NUM >= 100000) { ParseState *pstate = make_parsestate(NULL); pstate->p_sourcetext = queryString; @@ -590,9 +577,7 @@ CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName, free_parsestate(pstate); } -#else - DoCopy(copyStatement, queryString, &copiedRowCount); -#endif + copiedRowTotal += copiedRowCount; CommandCounterIncrement(); } diff --git a/src/backend/distributed/worker/worker_partition_protocol.c b/src/backend/distributed/worker/worker_partition_protocol.c index 07e09a175..962e4ea7c 100644 --- a/src/backend/distributed/worker/worker_partition_protocol.c +++ b/src/backend/distributed/worker/worker_partition_protocol.c @@ -853,12 +853,8 @@ FileOutputStreamFlush(FileOutputStream file) int written = 0; errno = 0; -#if (PG_VERSION_NUM >= 100000) written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len, PG_WAIT_IO); -#else - written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len); -#endif if (written != fileBuffer->len) { ereport(ERROR, (errcode_for_file_access(), diff --git a/src/backend/distributed/worker/worker_sql_task_protocol.c b/src/backend/distributed/worker/worker_sql_task_protocol.c index c9246038c..f0a8a1b53 100644 --- a/src/backend/distributed/worker/worker_sql_task_protocol.c +++ b/src/backend/distributed/worker/worker_sql_task_protocol.c @@ -251,11 +251,7 @@ TaskFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest) static void WriteToLocalFile(StringInfo copyData, File fileDesc) { -#if (PG_VERSION_NUM >= 100000) int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len, PG_WAIT_IO); -#else - int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len); -#endif if (bytesWritten < 0) { ereport(ERROR, (errcode_for_file_access(), diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 68c078d24..28e729975 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -39,9 +39,6 @@ extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, char *completionTag); -extern void multi_ProcessUtility9x(Node *parsetree, const char *queryString, - ProcessUtilityContext context, ParamListInfo params, - DestReceiver *dest, char *completionTag); extern void CitusProcessUtility(Node *node, const char *queryString, ProcessUtilityContext context, ParamListInfo params, DestReceiver *dest, char *completionTag); diff --git a/src/include/distributed/multi_progress.h b/src/include/distributed/multi_progress.h index 8bf5924f7..eea3624c8 100644 --- a/src/include/distributed/multi_progress.h +++ b/src/include/distributed/multi_progress.h @@ -17,13 +17,6 @@ #include "nodes/pg_list.h" -#if (PG_VERSION_NUM < 100000) - -/* define symbols that are undefined in PostgreSQL <= 9.6 */ -#define DSM_HANDLE_INVALID 0 -extern Datum pg_stat_get_progress_info(PG_FUNCTION_ARGS); -#endif - typedef struct ProgressMonitorData { uint64 processId; diff --git a/src/include/distributed/task_tracker.h b/src/include/distributed/task_tracker.h index 8dd6a1af5..23cb2c15b 100644 --- a/src/include/distributed/task_tracker.h +++ b/src/include/distributed/task_tracker.h @@ -105,8 +105,6 @@ typedef struct WorkerTasksSharedStateData int taskHashTrancheId; #if (PG_VERSION_NUM >= 100000) char *taskHashTrancheName; -#else - LWLockTranche taskHashLockTranche; #endif LWLock taskHashLock; bool conninfosValid; diff --git a/src/include/distributed/version_compat.h b/src/include/distributed/version_compat.h index 5d75089fa..cb2ab66ed 100644 --- a/src/include/distributed/version_compat.h +++ b/src/include/distributed/version_compat.h @@ -16,19 +16,7 @@ #include "catalog/namespace.h" #include "nodes/parsenodes.h" -#if (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 90700) - -/* Backports from PostgreSQL 10 */ -/* Accessor for the i'th attribute of tupdesc. */ -#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) - -#endif - -#if (PG_VERSION_NUM < 100000) -struct QueryEnvironment; /* forward-declare to appease compiler */ -#endif - -#if (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 110000) +#if (PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000) #include "access/hash.h" #include "storage/fd.h" diff --git a/src/test/regress/expected/isolation_partitioned_copy_vs_all_1.out b/src/test/regress/expected/isolation_partitioned_copy_vs_all_1.out deleted file mode 100644 index e24461b46..000000000 --- a/src/test/regress/expected/isolation_partitioned_copy_vs_all_1.out +++ /dev/null @@ -1,6 +0,0 @@ -Parsed test spec with 2 sessions - -starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count -setup failed: ERROR: syntax error at or near "PARTITION" -LINE 3: ...itioned_copy(id integer, data text, int_data int) PARTITION ... - ^ diff --git a/src/test/regress/expected/multi_create_table_new_features_0.out b/src/test/regress/expected/multi_create_table_new_features_0.out deleted file mode 100644 index 5bec9da7a..000000000 --- a/src/test/regress/expected/multi_create_table_new_features_0.out +++ /dev/null @@ -1,35 +0,0 @@ --- --- MULTI_CREATE_TABLE_NEW_FEATURES --- --- print major version to make version-specific tests clear -SHOW server_version \gset -SELECT substring(:'server_version', '\d+') AS major_version; - major_version ---------------- - 9 -(1 row) - --- Verify that the GENERATED ... AS IDENTITY feature in PostgreSQL 10 --- is forbidden in distributed tables. -CREATE TABLE table_identity_col ( - id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, - payload text ); -ERROR: syntax error at or near "GENERATED" -LINE 2: id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, - ^ -SELECT master_create_distributed_table('table_identity_col', 'id', 'append'); -ERROR: relation "table_identity_col" does not exist -LINE 1: SELECT master_create_distributed_table('table_identity_col',... - ^ -SELECT create_distributed_table('table_identity_col', 'id'); -ERROR: relation "table_identity_col" does not exist -LINE 1: SELECT create_distributed_table('table_identity_col', 'id'); - ^ -SELECT create_distributed_table('table_identity_col', 'text'); -ERROR: relation "table_identity_col" does not exist -LINE 1: SELECT create_distributed_table('table_identity_col', 'text'... - ^ -SELECT create_reference_table('table_identity_col'); -ERROR: relation "table_identity_col" does not exist -LINE 1: SELECT create_reference_table('table_identity_col'); - ^ diff --git a/src/test/regress/expected/multi_explain_0.out b/src/test/regress/expected/multi_explain_0.out deleted file mode 100644 index 18373f232..000000000 --- a/src/test/regress/expected/multi_explain_0.out +++ /dev/null @@ -1,1223 +0,0 @@ --- --- MULTI_EXPLAIN --- -SET citus.next_shard_id TO 570000; --- print whether we're using version > 9 to make version-specific tests clear -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; - version_above_nine --------------------- - f -(1 row) - -\a\t -SET citus.task_executor_type TO 'real-time'; -SET citus.explain_distributed_queries TO on; --- Function that parses explain output as JSON -CREATE FUNCTION explain_json(query text) -RETURNS jsonb -AS $BODY$ -DECLARE - result jsonb; -BEGIN - EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; - RETURN result; -END; -$BODY$ LANGUAGE plpgsql; --- Function that parses explain output as XML -CREATE FUNCTION explain_xml(query text) -RETURNS xml -AS $BODY$ -DECLARE - result xml; -BEGIN - EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; - RETURN result; -END; -$BODY$ LANGUAGE plpgsql; --- VACUMM related tables to ensure test outputs are stable -VACUUM ANALYZE lineitem; -VACUUM ANALYZE orders; --- Test Text format -EXPLAIN (COSTS FALSE, FORMAT TEXT) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Sort - Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity - -> HashAggregate - Group Key: remote_scan.l_quantity - -> Custom Scan (Citus Real-Time) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=57638 dbname=regression - -> HashAggregate - Group Key: l_quantity - -> Seq Scan on lineitem_290000 lineitem --- Test disable hash aggregate -SET enable_hashagg TO off; -EXPLAIN (COSTS FALSE, FORMAT TEXT) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Sort - Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity - -> GroupAggregate - Group Key: remote_scan.l_quantity - -> Sort - Sort Key: remote_scan.l_quantity - -> Custom Scan (Citus Real-Time) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=57638 dbname=regression - -> HashAggregate - Group Key: l_quantity - -> Seq Scan on lineitem_290000 lineitem -SET enable_hashagg TO on; --- Test JSON format -EXPLAIN (COSTS FALSE, FORMAT JSON) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -[ - { - "Plan": { - "Node Type": "Sort", - "Parallel Aware": false, - "Sort Key": ["COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)", "remote_scan.l_quantity"], - "Plans": [ - { - "Node Type": "Aggregate", - "Strategy": "Hashed", - "Partial Mode": "Simple", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Group Key": ["remote_scan.l_quantity"], - "Plans": [ - { - "Node Type": "Custom Scan", - "Parent Relationship": "Outer", - "Custom Plan Provider": "Citus Real-Time", - "Parallel Aware": false, - "Distributed Query": { - "Job": { - "Task Count": 2, - "Tasks Shown": "One of 2", - "Tasks": [ - { - "Node": "host=localhost port=57638 dbname=regression", - "Remote Plan": [ - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Hashed", - "Partial Mode": "Simple", - "Parallel Aware": false, - "Group Key": ["l_quantity"], - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Relation Name": "lineitem_290000", - "Alias": "lineitem" - } - ] - } - } - ] - - ] - } - ] - } - } - } - ] - } - ] - } - } -] --- Validate JSON format -SELECT true AS valid FROM explain_json($$ - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); -t --- Test XML format -EXPLAIN (COSTS FALSE, FORMAT XML) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - - - - Sort - false - - COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint) - remote_scan.l_quantity - - - - Aggregate - Hashed - Simple - Outer - false - - remote_scan.l_quantity - - - - Custom Scan - Outer - Citus Real-Time - false - - - 2 - One of 2 - - - host=localhost port=57638 dbname=regression - - - - - Aggregate - Hashed - Simple - false - - l_quantity - - - - Seq Scan - Outer - false - lineitem_290000 - lineitem - - - - - - - - - - - - - - - - - --- Validate XML format -SELECT true AS valid FROM explain_xml($$ - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); -t --- Test YAML format -EXPLAIN (COSTS FALSE, FORMAT YAML) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Plan: - Node Type: "Sort" - Parallel Aware: false - Sort Key: - - "COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)" - - "remote_scan.l_quantity" - Plans: - - Node Type: "Aggregate" - Strategy: "Hashed" - Partial Mode: "Simple" - Parent Relationship: "Outer" - Parallel Aware: false - Group Key: - - "remote_scan.l_quantity" - Plans: - - Node Type: "Custom Scan" - Parent Relationship: "Outer" - Custom Plan Provider: "Citus Real-Time" - Parallel Aware: false - Distributed Query: - Job: - Task Count: 2 - Tasks Shown: "One of 2" - Tasks: - - Node: "host=localhost port=57638 dbname=regression" - Remote Plan: - - Plan: - Node Type: "Aggregate" - Strategy: "Hashed" - Partial Mode: "Simple" - Parallel Aware: false - Group Key: - - "l_quantity" - Plans: - - Node Type: "Seq Scan" - Parent Relationship: "Outer" - Parallel Aware: false - Relation Name: "lineitem_290000" - Alias: "lineitem" - --- Test Text format -EXPLAIN (COSTS FALSE, FORMAT TEXT) - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Sort - Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity - -> HashAggregate - Group Key: remote_scan.l_quantity - -> Custom Scan (Citus Real-Time) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=57638 dbname=regression - -> HashAggregate - Group Key: l_quantity - -> Seq Scan on lineitem_290000 lineitem --- Test verbose -EXPLAIN (COSTS FALSE, VERBOSE TRUE) - SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; -Aggregate - Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) - -> Custom Scan (Citus Real-Time) - Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2" - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=57638 dbname=regression - -> Aggregate - Output: sum(l_quantity), sum(l_quantity), count(l_quantity) - -> Seq Scan on public.lineitem_290000 lineitem - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment --- Test join -EXPLAIN (COSTS FALSE) - SELECT * FROM lineitem - JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5.0 - ORDER BY l_quantity LIMIT 10; -Limit - -> Sort - Sort Key: remote_scan.l_quantity - -> Custom Scan (Citus Real-Time) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=57638 dbname=regression - -> Limit - -> Sort - Sort Key: lineitem.l_quantity - -> Hash Join - Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) - -> Seq Scan on lineitem_290000 lineitem - Filter: (l_quantity < 5.0) - -> Hash - -> Seq Scan on orders_290002 orders --- Test insert -EXPLAIN (COSTS FALSE) - INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0); -Custom Scan (Citus Router) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57638 dbname=regression - -> Insert on lineitem_290000 citus_table_alias - -> Values Scan on "*VALUES*" --- Test update -EXPLAIN (COSTS FALSE) - UPDATE lineitem - SET l_suppkey = 12 - WHERE l_orderkey = 1 AND l_partkey = 0; -Custom Scan (Citus Router) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57638 dbname=regression - -> Update on lineitem_290000 lineitem - -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem - Index Cond: (l_orderkey = 1) - Filter: (l_partkey = 0) --- Test delete -EXPLAIN (COSTS FALSE) - DELETE FROM lineitem - WHERE l_orderkey = 1 AND l_partkey = 0; -Custom Scan (Citus Router) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57638 dbname=regression - -> Delete on lineitem_290000 lineitem - -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem - Index Cond: (l_orderkey = 1) - Filter: (l_partkey = 0) --- Test zero-shard update -EXPLAIN (COSTS FALSE) - UPDATE lineitem - SET l_suppkey = 12 - WHERE l_orderkey = 1 AND l_orderkey = 0; -Custom Scan (Citus Router) - Task Count: 0 - Tasks Shown: All --- Test zero-shard delete -EXPLAIN (COSTS FALSE) - DELETE FROM lineitem - WHERE l_orderkey = 1 AND l_orderkey = 0; -Custom Scan (Citus Router) - Task Count: 0 - Tasks Shown: All --- Test single-shard SELECT -EXPLAIN (COSTS FALSE) - SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; -Custom Scan (Citus Router) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem - Index Cond: (l_orderkey = 5) -SELECT true AS valid FROM explain_xml($$ - SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); -t -SELECT true AS valid FROM explain_json($$ - SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); -t --- Test CREATE TABLE ... AS -EXPLAIN (COSTS FALSE) - CREATE TABLE explain_result AS - SELECT * FROM lineitem; -Custom Scan (Citus Real-Time) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=57638 dbname=regression - -> Seq Scan on lineitem_290000 lineitem --- Test having -EXPLAIN (COSTS FALSE, VERBOSE TRUE) - SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem - HAVING sum(l_quantity) > 100; -Aggregate - Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) - Filter: (sum(remote_scan.worker_column_4) > '100'::numeric) - -> Custom Scan (Citus Real-Time) - Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4 - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=57638 dbname=regression - -> Aggregate - Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) - -> Seq Scan on public.lineitem_290000 lineitem - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment --- Test having without aggregate -EXPLAIN (COSTS FALSE, VERBOSE TRUE) - SELECT l_quantity FROM lineitem - GROUP BY l_quantity - HAVING l_quantity > (100 * random()); -HashAggregate - Output: remote_scan.l_quantity - Group Key: remote_scan.l_quantity - Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random())) - -> Custom Scan (Citus Real-Time) - Output: remote_scan.l_quantity, remote_scan.worker_column_2 - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=57638 dbname=regression - -> HashAggregate - Output: l_quantity, l_quantity - Group Key: lineitem.l_quantity - -> Seq Scan on public.lineitem_290000 lineitem - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment --- Subquery pushdown tests with explain -EXPLAIN (COSTS OFF) -SELECT - avg(array_length(events, 1)) AS event_average -FROM - (SELECT - tenant_id, - user_id, - array_agg(event_type ORDER BY event_time) AS events - FROM - (SELECT - (users.composite_id).tenant_id, - (users.composite_id).user_id, - event_type, - events.event_time - FROM - users, - events - WHERE - (users.composite_id) = (events.composite_id) AND - users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type IN ('click', 'submit', 'pay')) AS subquery - GROUP BY - tenant_id, - user_id) AS subquery; -Aggregate - -> Custom Scan (Citus Real-Time) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=57637 dbname=regression - -> Aggregate - -> GroupAggregate - Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) - -> Sort - Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) - -> Hash Join - Hash Cond: (users.composite_id = events.composite_id) - -> Seq Scan on users_1400289 users - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) - -> Hash - -> Seq Scan on events_1400285 events - Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[])) --- Union and left join subquery pushdown -EXPLAIN (COSTS OFF) -SELECT - avg(array_length(events, 1)) AS event_average, - hasdone -FROM - (SELECT - subquery_1.tenant_id, - subquery_1.user_id, - array_agg(event ORDER BY event_time) AS events, - COALESCE(hasdone, 'Has not done paying') AS hasdone - FROM - ( - (SELECT - (users.composite_id).tenant_id, - (users.composite_id).user_id, - (users.composite_id) as composite_id, - 'action=>1'AS event, - events.event_time - FROM - users, - events - WHERE - (users.composite_id) = (events.composite_id) AND - users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'click') - UNION - (SELECT - (users.composite_id).tenant_id, - (users.composite_id).user_id, - (users.composite_id) as composite_id, - 'action=>2'AS event, - events.event_time - FROM - users, - events - WHERE - (users.composite_id) = (events.composite_id) AND - users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'submit') - ) AS subquery_1 - LEFT JOIN - (SELECT - DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id, - (composite_id).tenant_id, - (composite_id).user_id, - 'Has done paying'::TEXT AS hasdone - FROM - events - WHERE - events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'pay') AS subquery_2 - ON - subquery_1.composite_id = subquery_2.composite_id - GROUP BY - subquery_1.tenant_id, - subquery_1.user_id, - hasdone) AS subquery_top -GROUP BY - hasdone; -HashAggregate - Group Key: remote_scan.hasdone - -> Custom Scan (Citus Real-Time) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=57637 dbname=regression - -> GroupAggregate - Group Key: subquery_top.hasdone - -> Sort - Sort Key: subquery_top.hasdone - -> Subquery Scan on subquery_top - -> GroupAggregate - Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone - -> Sort - Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone - -> Hash Left Join - Hash Cond: (users.composite_id = subquery_2.composite_id) - -> HashAggregate - Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time - -> Append - -> Hash Join - Hash Cond: (users.composite_id = events.composite_id) - -> Seq Scan on users_1400289 users - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) - -> Hash - -> Seq Scan on events_1400285 events - Filter: ((event_type)::text = 'click'::text) - -> Hash Join - Hash Cond: (users_1.composite_id = events_1.composite_id) - -> Seq Scan on users_1400289 users_1 - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) - -> Hash - -> Seq Scan on events_1400285 events_1 - Filter: ((event_type)::text = 'submit'::text) - -> Hash - -> Subquery Scan on subquery_2 - -> Unique - -> Sort - Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id) - -> Seq Scan on events_1400285 events_2 - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text)) --- Union, left join and having subquery pushdown -EXPLAIN (COSTS OFF) - SELECT - avg(array_length(events, 1)) AS event_average, - count_pay - FROM ( - SELECT - subquery_1.tenant_id, - subquery_1.user_id, - array_agg(event ORDER BY event_time) AS events, - COALESCE(count_pay, 0) AS count_pay - FROM - ( - (SELECT - (users.composite_id).tenant_id, - (users.composite_id).user_id, - (users.composite_id), - 'action=>1'AS event, - events.event_time - FROM - users, - events - WHERE - (users.composite_id) = (events.composite_id) AND - users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'click') - UNION - (SELECT - (users.composite_id).tenant_id, - (users.composite_id).user_id, - (users.composite_id), - 'action=>2'AS event, - events.event_time - FROM - users, - events - WHERE - (users.composite_id) = (events.composite_id) AND - users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'submit') - ) AS subquery_1 - LEFT JOIN - (SELECT - (composite_id).tenant_id, - (composite_id).user_id, - composite_id, - COUNT(*) AS count_pay - FROM - events - WHERE - events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND - event_type = 'pay' - GROUP BY - composite_id - HAVING - COUNT(*) > 2) AS subquery_2 - ON - subquery_1.composite_id = subquery_2.composite_id - GROUP BY - subquery_1.tenant_id, - subquery_1.user_id, - count_pay) AS subquery_top -WHERE - array_ndims(events) > 0 -GROUP BY - count_pay -ORDER BY - count_pay; -ERROR: bogus varattno for OUTER_VAR var: 3 --- Lateral join subquery pushdown --- set subquery_pushdown due to limit in the query -SET citus.subquery_pushdown to ON; -EXPLAIN (COSTS OFF) -SELECT - tenant_id, - user_id, - user_lastseen, - event_array -FROM - (SELECT - tenant_id, - user_id, - max(lastseen) as user_lastseen, - array_agg(event_type ORDER BY event_time) AS event_array - FROM - (SELECT - (composite_id).tenant_id, - (composite_id).user_id, - composite_id, - lastseen - FROM - users - WHERE - composite_id >= '(1, -9223372036854775808)'::user_composite_type AND - composite_id <= '(1, 9223372036854775807)'::user_composite_type - ORDER BY - lastseen DESC - LIMIT - 10 - ) AS subquery_top - LEFT JOIN LATERAL - (SELECT - event_type, - event_time - FROM - events - WHERE - (composite_id) = subquery_top.composite_id - ORDER BY - event_time DESC - LIMIT - 99) AS subquery_lateral - ON - true - GROUP BY - tenant_id, - user_id - ) AS shard_union -ORDER BY - user_lastseen DESC -LIMIT - 10; -Limit - -> Sort - Sort Key: remote_scan.user_lastseen DESC - -> Custom Scan (Citus Real-Time) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=57637 dbname=regression - -> Limit - -> Sort - Sort Key: (max(users.lastseen)) DESC - -> GroupAggregate - Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) - -> Sort - Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) - -> Nested Loop Left Join - -> Limit - -> Sort - Sort Key: users.lastseen DESC - -> Seq Scan on users_1400289 users - Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) - -> Limit - -> Sort - Sort Key: events.event_time DESC - -> Seq Scan on events_1400285 events - Filter: (composite_id = users.composite_id) -RESET citus.subquery_pushdown; --- Test all tasks output -SET citus.explain_all_tasks TO on; -EXPLAIN (COSTS FALSE) - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -Aggregate - -> Custom Scan (Citus Real-Time) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Aggregate - -> Seq Scan on lineitem_290001 lineitem - Filter: (l_orderkey > 9030) -SELECT true AS valid FROM explain_xml($$ - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); -t -SELECT true AS valid FROM explain_json($$ - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); -t - --- Test multi shard update -EXPLAIN (COSTS FALSE) - UPDATE lineitem_hash_part - SET l_suppkey = 12; -Custom Scan (Citus Router) - Task Count: 4 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Update on lineitem_hash_part_360041 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - -> Task - Node: host=localhost port=57638 dbname=regression - -> Update on lineitem_hash_part_360042 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part - -> Task - Node: host=localhost port=57637 dbname=regression - -> Update on lineitem_hash_part_360043 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part - -> Task - Node: host=localhost port=57638 dbname=regression - -> Update on lineitem_hash_part_360044 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part - -EXPLAIN (COSTS FALSE) - UPDATE lineitem_hash_part - SET l_suppkey = 12 - WHERE l_orderkey = 1 OR l_orderkey = 3; -Custom Scan (Citus Router) - Task Count: 2 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Update on lineitem_hash_part_360041 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - Filter: ((l_orderkey = 1) OR (l_orderkey = 3)) - -> Task - Node: host=localhost port=57638 dbname=regression - -> Update on lineitem_hash_part_360042 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part - Filter: ((l_orderkey = 1) OR (l_orderkey = 3)) --- Test multi shard delete -EXPLAIN (COSTS FALSE) - DELETE FROM lineitem_hash_part; -Custom Scan (Citus Router) - Task Count: 4 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Delete on lineitem_hash_part_360041 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - -> Task - Node: host=localhost port=57638 dbname=regression - -> Delete on lineitem_hash_part_360042 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part - -> Task - Node: host=localhost port=57637 dbname=regression - -> Delete on lineitem_hash_part_360043 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part - -> Task - Node: host=localhost port=57638 dbname=regression - -> Delete on lineitem_hash_part_360044 lineitem_hash_part - -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part -SET citus.explain_all_tasks TO off; --- Test update with subquery -EXPLAIN (COSTS FALSE) - UPDATE lineitem_hash_part - SET l_suppkey = 12 - FROM orders_hash_part - WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey; -Custom Scan (Citus Router) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=57637 dbname=regression - -> Update on lineitem_hash_part_360041 lineitem_hash_part - -> Hash Join - Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey) - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - -> Hash - -> Seq Scan on orders_hash_part_360045 orders_hash_part --- Test delete with subquery -EXPLAIN (COSTS FALSE) - DELETE FROM lineitem_hash_part - USING orders_hash_part - WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey; -Custom Scan (Citus Router) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=57637 dbname=regression - -> Delete on lineitem_hash_part_360041 lineitem_hash_part - -> Hash Join - Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey) - -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part - -> Hash - -> Seq Scan on orders_hash_part_360045 orders_hash_part --- Test track tracker -SET citus.task_executor_type TO 'task-tracker'; -EXPLAIN (COSTS FALSE) - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -Aggregate - -> Custom Scan (Citus Task-Tracker) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Aggregate - -> Seq Scan on lineitem_290001 lineitem - Filter: (l_orderkey > 9030) --- Test re-partition join -EXPLAIN (COSTS FALSE) - SELECT count(*) - FROM lineitem, orders, customer_append, supplier_single_shard - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey; -Aggregate - -> Custom Scan (Citus Task-Tracker) - Task Count: 1 - Tasks Shown: None, not supported for re-partition queries - -> MapMergeJob - Map Task Count: 1 - Merge Task Count: 1 - -> MapMergeJob - Map Task Count: 2 - Merge Task Count: 1 -EXPLAIN (COSTS FALSE, FORMAT JSON) - SELECT count(*) - FROM lineitem, orders, customer_append, supplier_single_shard - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey; -[ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Simple", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Custom Scan", - "Parent Relationship": "Outer", - "Custom Plan Provider": "Citus Task-Tracker", - "Parallel Aware": false, - "Distributed Query": { - "Job": { - "Task Count": 1, - "Tasks Shown": "None, not supported for re-partition queries", - "Depended Jobs": [ - { - "Map Task Count": 1, - "Merge Task Count": 1, - "Depended Jobs": [ - { - "Map Task Count": 2, - "Merge Task Count": 1 - } - ] - } - ] - } - } - } - ] - } - } -] -SELECT true AS valid FROM explain_json($$ - SELECT count(*) - FROM lineitem, orders, customer_append, supplier_single_shard - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey$$); -t -EXPLAIN (COSTS FALSE, FORMAT XML) - SELECT count(*) - FROM lineitem, orders, customer_append, supplier_single_shard - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey; - - - - Aggregate - Plain - Simple - false - - - Custom Scan - Outer - Citus Task-Tracker - false - - - 1 - None, not supported for re-partition queries - - - 1 - 1 - - - 2 - 1 - - - - - - - - - - - -SELECT true AS valid FROM explain_xml($$ - SELECT count(*) - FROM lineitem, orders, customer_append, supplier - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey$$); -t --- make sure that EXPLAIN works without --- problems for queries that inlvolves only --- reference tables -SELECT true AS valid FROM explain_xml($$ - SELECT count(*) - FROM nation - WHERE n_name = 'CHINA'$$); -t -SELECT true AS valid FROM explain_xml($$ - SELECT count(*) - FROM nation, supplier - WHERE nation.n_nationkey = supplier.s_nationkey$$); -t -EXPLAIN (COSTS FALSE, FORMAT YAML) - SELECT count(*) - FROM lineitem, orders, customer, supplier_single_shard - WHERE l_orderkey = o_orderkey - AND o_custkey = c_custkey - AND l_suppkey = s_suppkey; -- Plan: - Node Type: "Aggregate" - Strategy: "Plain" - Partial Mode: "Simple" - Parallel Aware: false - Plans: - - Node Type: "Custom Scan" - Parent Relationship: "Outer" - Custom Plan Provider: "Citus Task-Tracker" - Parallel Aware: false - Distributed Query: - Job: - Task Count: 1 - Tasks Shown: "None, not supported for re-partition queries" - Depended Jobs: - - Map Task Count: 2 - Merge Task Count: 1 --- test parallel aggregates -SET parallel_setup_cost=0; -SET parallel_tuple_cost=0; -SET min_parallel_relation_size=0; -SET min_parallel_table_scan_size=0; -ERROR: unrecognized configuration parameter "min_parallel_table_scan_size" -SET max_parallel_workers_per_gather=4; --- ensure local plans display correctly -CREATE TABLE lineitem_clone (LIKE lineitem); -EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone; -Finalize Aggregate - -> Gather - Workers Planned: 3 - -> Partial Aggregate - -> Parallel Seq Scan on lineitem_clone --- ensure distributed plans don't break -EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem; -Aggregate - -> Custom Scan (Citus Task-Tracker) - Task Count: 2 - Tasks Shown: One of 2 - -> Task - Node: host=localhost port=57638 dbname=regression - -> Aggregate - -> Seq Scan on lineitem_290000 lineitem --- ensure EXPLAIN EXECUTE doesn't crash -PREPARE task_tracker_query AS - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query; -Aggregate - -> Custom Scan (Citus Task-Tracker) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Aggregate - -> Seq Scan on lineitem_290001 lineitem - Filter: (l_orderkey > 9030) -SET citus.task_executor_type TO 'real-time'; -PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; -EXPLAIN EXECUTE router_executor_query; -Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5) - Index Cond: (l_orderkey = 5) -PREPARE real_time_executor_query AS - SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query; -Aggregate - -> Custom Scan (Citus Real-Time) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Aggregate - -> Seq Scan on lineitem_290001 lineitem - Filter: (l_orderkey > 9030) --- EXPLAIN EXECUTE of parametrized prepared statements is broken, but --- at least make sure to fail without crashing -PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1; -EXPLAIN EXECUTE router_executor_query_param(5); -Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..13.60 rows=4 width=5) - Index Cond: (l_orderkey = 5) --- test explain in a transaction with alter table to test we use right connections -BEGIN; -CREATE TABLE explain_table(id int); -SELECT create_distributed_table('explain_table', 'id'); - -ALTER TABLE explain_table ADD COLUMN value int; -ROLLBACK; --- test explain with local INSERT ... SELECT -EXPLAIN (COSTS OFF) -INSERT INTO lineitem_hash_part -SELECT o_orderkey FROM orders_hash_part LIMIT 3; -Custom Scan (Citus INSERT ... SELECT via coordinator) - -> Limit - -> Custom Scan (Citus Real-Time) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=57638 dbname=regression - -> Limit - -> Seq Scan on orders_hash_part_360045 orders_hash_part -SELECT true AS valid FROM explain_json($$ - INSERT INTO lineitem_hash_part (l_orderkey) - SELECT o_orderkey FROM orders_hash_part LIMIT 3; -$$); -t -EXPLAIN (COSTS OFF) -INSERT INTO lineitem_hash_part (l_orderkey, l_quantity) -SELECT o_orderkey, 5 FROM orders_hash_part LIMIT 3; -Custom Scan (Citus INSERT ... SELECT via coordinator) - -> Limit - -> Custom Scan (Citus Real-Time) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=57638 dbname=regression - -> Limit - -> Seq Scan on orders_hash_part_360045 orders_hash_part -EXPLAIN (COSTS OFF) -INSERT INTO lineitem_hash_part (l_orderkey) -SELECT s FROM generate_series(1,5) s; -Custom Scan (Citus INSERT ... SELECT via coordinator) - -> Function Scan on generate_series s -EXPLAIN (COSTS OFF) -WITH cte1 AS (SELECT s FROM generate_series(1,10) s) -INSERT INTO lineitem_hash_part -WITH cte1 AS (SELECT * FROM cte1 LIMIT 5) -SELECT s FROM cte1; -Custom Scan (Citus INSERT ... SELECT via coordinator) - -> CTE Scan on cte1 - CTE cte1 - -> Function Scan on generate_series s - CTE cte1 - -> Limit - -> CTE Scan on cte1 cte1_1 -EXPLAIN (COSTS OFF) -INSERT INTO lineitem_hash_part -( SELECT s FROM generate_series(1,5) s) UNION -( SELECT s FROM generate_series(5,10) s); -Custom Scan (Citus INSERT ... SELECT via coordinator) - -> HashAggregate - Group Key: s.s - -> Append - -> Function Scan on generate_series s - -> Function Scan on generate_series s_1 --- explain with recursive planning -EXPLAIN (COSTS OFF, VERBOSE true) -WITH keys AS ( - SELECT DISTINCT l_orderkey FROM lineitem_hash_part -), -series AS ( - SELECT s FROM generate_series(1,10) s -) -SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey) -ORDER BY s; -Custom Scan (Citus Router) - Output: remote_scan.l_orderkey - -> Distributed Subplan 57_1 - -> HashAggregate - Output: remote_scan.l_orderkey - Group Key: remote_scan.l_orderkey - -> Custom Scan (Citus Real-Time) - Output: remote_scan.l_orderkey - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=57637 dbname=regression - -> HashAggregate - Output: l_orderkey - Group Key: lineitem_hash_part.l_orderkey - -> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part - Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment - -> Distributed Subplan 57_2 - -> Function Scan on pg_catalog.generate_series s - Output: s - Function Call: generate_series(1, 10) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57638 dbname=regression - -> Merge Join - Output: intermediate_result_1.l_orderkey, intermediate_result.s - Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey) - -> Sort - Output: intermediate_result.s - Sort Key: intermediate_result.s - -> Function Scan on pg_catalog.read_intermediate_result intermediate_result - Output: intermediate_result.s - Function Call: read_intermediate_result('57_2'::text, 'binary'::citus_copy_format) - -> Sort - Output: intermediate_result_1.l_orderkey - Sort Key: intermediate_result_1.l_orderkey - -> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1 - Output: intermediate_result_1.l_orderkey - Function Call: read_intermediate_result('57_1'::text, 'binary'::citus_copy_format) -SELECT true AS valid FROM explain_json($$ - WITH result AS ( - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity - ), - series AS ( - SELECT s FROM generate_series(1,10) s - ) - SELECT * FROM result JOIN series ON (s = count_quantity) JOIN orders_hash_part ON (s = o_orderkey) -$$); -t -SELECT true AS valid FROM explain_xml($$ - WITH result AS ( - SELECT l_quantity, count(*) count_quantity FROM lineitem - GROUP BY l_quantity ORDER BY count_quantity, l_quantity - ), - series AS ( - SELECT s FROM generate_series(1,10) s - ) - SELECT * FROM result JOIN series ON (s = l_quantity) JOIN orders_hash_part ON (s = o_orderkey) -$$); -t diff --git a/src/test/regress/expected/multi_index_statements_1.out b/src/test/regress/expected/multi_index_statements_1.out deleted file mode 100644 index 8628a1010..000000000 --- a/src/test/regress/expected/multi_index_statements_1.out +++ /dev/null @@ -1,304 +0,0 @@ --- --- MULTI_INDEX_STATEMENTS --- --- Check that we can run CREATE INDEX and DROP INDEX statements on distributed --- tables. -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int AS major_version; - major_version ---------------- - 9 -(1 row) - --- --- CREATE TEST TABLES --- -SET citus.next_shard_id TO 102080; -CREATE TABLE index_test_range(a int, b int, c int); -SELECT create_distributed_table('index_test_range', 'a', 'range'); - create_distributed_table --------------------------- - -(1 row) - -SELECT master_create_empty_shard('index_test_range'); - master_create_empty_shard ---------------------------- - 102080 -(1 row) - -SELECT master_create_empty_shard('index_test_range'); - master_create_empty_shard ---------------------------- - 102081 -(1 row) - -SET citus.shard_count TO 8; -SET citus.shard_replication_factor TO 2; -CREATE TABLE index_test_hash(a int, b int, c int); -SELECT create_distributed_table('index_test_hash', 'a', 'hash'); - create_distributed_table --------------------------- - -(1 row) - -CREATE TABLE index_test_append(a int, b int, c int); -SELECT create_distributed_table('index_test_append', 'a', 'append'); - create_distributed_table --------------------------- - -(1 row) - -SELECT master_create_empty_shard('index_test_append'); - master_create_empty_shard ---------------------------- - 102090 -(1 row) - -SELECT master_create_empty_shard('index_test_append'); - master_create_empty_shard ---------------------------- - 102091 -(1 row) - --- --- CREATE INDEX --- --- Verify that we can create different types of indexes -CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey); -CREATE INDEX lineitem_partkey_desc_index ON lineitem (l_partkey DESC); -CREATE INDEX lineitem_partial_index ON lineitem (l_shipdate) - WHERE l_shipdate < '1995-01-01'; -CREATE INDEX lineitem_colref_index ON lineitem (record_ne(lineitem.*, NULL)); -SET client_min_messages = ERROR; -- avoid version dependant warning about WAL -CREATE INDEX lineitem_orderkey_hash_index ON lineitem USING hash (l_partkey); -CREATE UNIQUE INDEX index_test_range_index_a ON index_test_range(a); -CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range(a,b); -CREATE UNIQUE INDEX index_test_hash_index_a ON index_test_hash(a); -CREATE UNIQUE INDEX index_test_hash_index_a_b ON index_test_hash(a,b); -CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON index_test_hash(a,b) WHERE c IS NOT NULL; -CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range(a,b) WHERE c IS NOT NULL; -CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON index_test_hash(a) INCLUDE (b,c); -ERROR: syntax error at or near "INCLUDE" -LINE 1: ...index_test_hash_index_a_b_c ON index_test_hash(a) INCLUDE (b... - ^ -RESET client_min_messages; --- Verify that we handle if not exists statements correctly -CREATE INDEX lineitem_orderkey_index on lineitem(l_orderkey); -ERROR: relation "lineitem_orderkey_index" already exists -CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on lineitem(l_orderkey); -NOTICE: relation "lineitem_orderkey_index" already exists, skipping -CREATE INDEX IF NOT EXISTS lineitem_orderkey_index_new on lineitem(l_orderkey); --- Verify if not exists behavior with an index with same name on a different table -CREATE INDEX lineitem_orderkey_index on index_test_hash(a); -ERROR: relation "lineitem_orderkey_index" already exists -CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on index_test_hash(a); -NOTICE: relation "lineitem_orderkey_index" already exists, skipping --- Verify that we can create indexes concurrently -CREATE INDEX CONCURRENTLY lineitem_concurrently_index ON lineitem (l_orderkey); --- Verify that we warn out on CLUSTER command for distributed tables and no parameter -CLUSTER index_test_hash USING index_test_hash_index_a; -WARNING: not propagating CLUSTER command to worker nodes -CLUSTER; -WARNING: not propagating CLUSTER command to worker nodes --- Verify that no-name local CREATE INDEX CONCURRENTLY works -CREATE TABLE local_table (id integer, name text); -CREATE INDEX CONCURRENTLY local_table_index ON local_table(id); --- Vefify we don't warn out on CLUSTER command for local tables -CLUSTER local_table USING local_table_index; -DROP TABLE local_table; --- Verify that all indexes got created on the master node and one of the workers -SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef -------------+------------------+------------------------------------+------------+---------------------------------------------------------------------------------------------------------------------------- - public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a) - public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b) - public | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON public.index_test_hash USING btree (a, b) WHERE (c IS NOT NULL) - public | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON public.index_test_range USING btree (a) - public | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON public.index_test_range USING btree (a, b) - public | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON public.index_test_range USING btree (a, b) WHERE (c IS NOT NULL) - public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON public.lineitem USING btree (record_ne(lineitem.*, NULL::record)) - public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey) - public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON public.lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date) - public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC) - public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber) - public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) -(15 rows) - -\c - - - :worker_1_port -SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1); - count -------- - 9 -(1 row) - -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%'; - count -------- - 24 -(1 row) - -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; - count -------- - 0 -(1 row) - -\c - - - :master_port --- Verify that we error out on unsupported statement types -CREATE UNIQUE INDEX try_index ON lineitem (l_orderkey); -ERROR: creating unique indexes on append-partitioned tables is currently unsupported -CREATE INDEX try_index ON lineitem (l_orderkey) TABLESPACE newtablespace; -ERROR: specifying tablespaces with CREATE INDEX statements is currently unsupported -CREATE UNIQUE INDEX try_unique_range_index ON index_test_range(b); -ERROR: creating unique indexes on non-partition columns is currently unsupported -CREATE UNIQUE INDEX try_unique_range_index_partial ON index_test_range(b) WHERE c IS NOT NULL; -ERROR: creating unique indexes on non-partition columns is currently unsupported -CREATE UNIQUE INDEX try_unique_hash_index ON index_test_hash(b); -ERROR: creating unique indexes on non-partition columns is currently unsupported -CREATE UNIQUE INDEX try_unique_hash_index_partial ON index_test_hash(b) WHERE c IS NOT NULL; -ERROR: creating unique indexes on non-partition columns is currently unsupported -CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(b); -ERROR: creating unique indexes on append-partitioned tables is currently unsupported -CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(a); -ERROR: creating unique indexes on append-partitioned tables is currently unsupported -CREATE UNIQUE INDEX try_unique_append_index_a_b ON index_test_append(a,b); -ERROR: creating unique indexes on append-partitioned tables is currently unsupported --- Verify that we error out in case of postgres errors on supported statement --- types. -CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey); -ERROR: relation "lineitem_orderkey_index" already exists -CREATE INDEX try_index ON lineitem USING gist (l_orderkey); -ERROR: data type bigint has no default operator class for access method "gist" -HINT: You must specify an operator class for the index or define a default operator class for the data type. -CREATE INDEX try_index ON lineitem (non_existent_column); -ERROR: column "non_existent_column" does not exist -CREATE INDEX ON lineitem (l_orderkey); -ERROR: creating index without a name on a distributed table is currently unsupported --- Verify that none of failed indexes got created on the master node -SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef -------------+------------------+------------------------------------+------------+---------------------------------------------------------------------------------------------------------------------------- - public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a) - public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b) - public | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON public.index_test_hash USING btree (a, b) WHERE (c IS NOT NULL) - public | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON public.index_test_range USING btree (a) - public | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON public.index_test_range USING btree (a, b) - public | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON public.index_test_range USING btree (a, b) WHERE (c IS NOT NULL) - public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON public.lineitem USING btree (record_ne(lineitem.*, NULL::record)) - public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey) - public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON public.lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date) - public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC) - public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber) - public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) -(15 rows) - --- --- DROP INDEX --- --- Verify that we can't drop multiple indexes in a single command -DROP INDEX lineitem_orderkey_index, lineitem_partial_index; -ERROR: cannot drop multiple distributed objects in a single command -HINT: Try dropping each object in a separate DROP command. --- Verify that we can succesfully drop indexes -DROP INDEX lineitem_orderkey_index; -DROP INDEX lineitem_orderkey_index_new; -DROP INDEX lineitem_partkey_desc_index; -DROP INDEX lineitem_partial_index; -DROP INDEX lineitem_colref_index; --- Verify that we handle if exists statements correctly -DROP INDEX non_existent_index; -ERROR: index "non_existent_index" does not exist -DROP INDEX IF EXISTS non_existent_index; -NOTICE: index "non_existent_index" does not exist, skipping -DROP INDEX IF EXISTS lineitem_orderkey_hash_index; -DROP INDEX lineitem_orderkey_hash_index; -ERROR: index "lineitem_orderkey_hash_index" does not exist -DROP INDEX index_test_range_index_a; -DROP INDEX index_test_range_index_a_b; -DROP INDEX index_test_range_index_a_b_partial; -DROP INDEX index_test_hash_index_a; -DROP INDEX index_test_hash_index_a_b; -DROP INDEX index_test_hash_index_a_b_partial; --- Verify that we can drop indexes concurrently -DROP INDEX CONCURRENTLY lineitem_concurrently_index; --- Verify that all the indexes are dropped from the master and one worker node. --- As there's a primary key, so exclude those from this check. -SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; - indrelid | indexrelid -----------+------------ -(0 rows) - -SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef -------------+-----------+-----------+------------+---------- -(0 rows) - -\c - - - :worker_1_port -SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; - indrelid | indexrelid -----------+------------ -(0 rows) - -SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef -------------+-----------+-----------+------------+---------- -(0 rows) - --- create index that will conflict with master operations -CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON index_test_hash_102089(b); -\c - - - :master_port --- should fail because worker index already exists -CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); -ERROR: CONCURRENTLY-enabled index command failed -DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. --- the failure results in an INVALID index -SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; - Index Valid? --------------- - f -(1 row) - --- we can clean it up and recreate with an DROP IF EXISTS -DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; -CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); -SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; - Index Valid? --------------- - t -(1 row) - -\c - - - :worker_1_port --- now drop shard index to test partial master DROP failure -DROP INDEX CONCURRENTLY ith_b_idx_102089; -\c - - - :master_port -DROP INDEX CONCURRENTLY ith_b_idx; -ERROR: CONCURRENTLY-enabled index command failed -DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. --- the failure results in an INVALID index -SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; - Index Valid? --------------- - f -(1 row) - --- final clean up -DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; --- Drop created tables -DROP TABLE index_test_range; -DROP TABLE index_test_hash; -DROP TABLE index_test_append; diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning_1.out b/src/test/regress/expected/multi_null_minmax_value_pruning_1.out deleted file mode 100644 index 2ceb92e08..000000000 --- a/src/test/regress/expected/multi_null_minmax_value_pruning_1.out +++ /dev/null @@ -1,303 +0,0 @@ --- --- MULTI_NULL_MINMAX_VALUE_PRUNING --- --- This test checks that we can handle null min/max values in shard statistics --- and that we don't partition or join prune shards that have null values. -SET citus.next_shard_id TO 760000; --- print major version number for version-specific tests -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int AS server_version; - server_version ----------------- - 9 -(1 row) - -SET client_min_messages TO DEBUG2; -SET citus.explain_all_tasks TO on; --- to avoid differing explain output - executor doesn't matter, --- because were testing pruning here. -SET citus.task_executor_type TO 'real-time'; --- Change configuration to treat lineitem and orders tables as large -SET citus.log_multi_join_order to true; -SET citus.enable_repartition_joins to ON; -SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; - shardminvalue | shardmaxvalue ----------------+--------------- - 1 | 5986 -(1 row) - -SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; - shardminvalue | shardmaxvalue ----------------+--------------- - 8997 | 14947 -(1 row) - --- Check that partition and join pruning works when min/max values exist --- Adding l_orderkey = 1 to make the query not router executable -EXPLAIN (COSTS FALSE) -SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; -LOG: join order: [ "lineitem" ] - QUERY PLAN ------------------------------------------------------------------------ - Custom Scan (Citus Real-Time) - Task Count: 2 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Bitmap Heap Scan on lineitem_290001 lineitem - Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1)) - -> BitmapOr - -> Bitmap Index Scan on lineitem_pkey_290001 - Index Cond: (l_orderkey = 9030) - -> Bitmap Index Scan on lineitem_pkey_290001 - Index Cond: (l_orderkey = 1) - -> Task - Node: host=localhost port=57638 dbname=regression - -> Bitmap Heap Scan on lineitem_290000 lineitem - Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1)) - -> BitmapOr - -> Bitmap Index Scan on lineitem_pkey_290000 - Index Cond: (l_orderkey = 9030) - -> Bitmap Index Scan on lineitem_pkey_290000 - Index Cond: (l_orderkey = 1) -(21 rows) - -EXPLAIN (COSTS FALSE) -SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders - WHERE l_orderkey = o_orderkey; -LOG: join order: [ "lineitem" ][ local partition join "orders" ] -DEBUG: join prunable for intervals [1,5986] and [8997,14947] -DEBUG: join prunable for intervals [8997,14947] and [1,5986] - QUERY PLAN ------------------------------------------------------------------------------------------------------- - Aggregate - -> Custom Scan (Citus Real-Time) - Task Count: 2 - Tasks Shown: All - -> Task - Node: host=localhost port=57638 dbname=regression - -> Aggregate - -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290002 on orders_290002 orders - -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem - -> Task - Node: host=localhost port=57637 dbname=regression - -> Aggregate - -> Merge Join - Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) - -> Index Only Scan using orders_pkey_290003 on orders_290003 orders - -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -(18 rows) - --- Now set the minimum value for a shard to null. Then check that we don't apply --- partition or join pruning for the shard with null min value. Since it is not --- supported with single-repartition join, dual-repartition has been used. -UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; -EXPLAIN (COSTS FALSE) -SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; -LOG: join order: [ "lineitem" ] - QUERY PLAN -------------------------------------------------------------------------------- - Custom Scan (Citus Real-Time) - Task Count: 2 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem - Index Cond: (l_orderkey = 9030) - -> Task - Node: host=localhost port=57638 dbname=regression - -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem - Index Cond: (l_orderkey = 9030) -(11 rows) - -EXPLAIN (COSTS FALSE) -SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders - WHERE l_partkey = o_custkey; -LOG: join order: [ "lineitem" ][ dual partition join "orders" ] -DEBUG: join prunable for task partitionId 0 and 1 -DEBUG: join prunable for task partitionId 0 and 2 -DEBUG: join prunable for task partitionId 0 and 3 -DEBUG: join prunable for task partitionId 1 and 0 -DEBUG: join prunable for task partitionId 1 and 2 -DEBUG: join prunable for task partitionId 1 and 3 -DEBUG: join prunable for task partitionId 2 and 0 -DEBUG: join prunable for task partitionId 2 and 1 -DEBUG: join prunable for task partitionId 2 and 3 -DEBUG: join prunable for task partitionId 3 and 0 -DEBUG: join prunable for task partitionId 3 and 1 -DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 2 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 5 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 7 -DETAIL: Creating dependency on merge taskId 9 -DEBUG: pruning merge fetch taskId 8 -DETAIL: Creating dependency on merge taskId 9 -DEBUG: pruning merge fetch taskId 10 -DETAIL: Creating dependency on merge taskId 12 -DEBUG: pruning merge fetch taskId 11 -DETAIL: Creating dependency on merge taskId 12 -DEBUG: cannot use real time executor with repartition jobs -HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker. - QUERY PLAN -------------------------------------------------------------------- - Aggregate - -> Custom Scan (Citus Task-Tracker) - Task Count: 4 - Tasks Shown: None, not supported for re-partition queries - -> MapMergeJob - Map Task Count: 2 - Merge Task Count: 4 - -> MapMergeJob - Map Task Count: 2 - Merge Task Count: 4 -(10 rows) - --- Next, set the maximum value for another shard to null. Then check that we --- don't apply partition or join pruning for this other shard either. Since it --- is not supported with single-repartition join, dual-repartition has been used. -UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; -EXPLAIN (COSTS FALSE) -SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; -LOG: join order: [ "lineitem" ] - QUERY PLAN -------------------------------------------------------------------------------- - Custom Scan (Citus Real-Time) - Task Count: 2 - Tasks Shown: All - -> Task - Node: host=localhost port=57638 dbname=regression - -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem - Index Cond: (l_orderkey = 9030) - -> Task - Node: host=localhost port=57637 dbname=regression - -> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem - Index Cond: (l_orderkey = 9030) -(11 rows) - -EXPLAIN (COSTS FALSE) -SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders - WHERE l_partkey = o_custkey; -LOG: join order: [ "lineitem" ][ dual partition join "orders" ] -DEBUG: join prunable for task partitionId 0 and 1 -DEBUG: join prunable for task partitionId 0 and 2 -DEBUG: join prunable for task partitionId 0 and 3 -DEBUG: join prunable for task partitionId 1 and 0 -DEBUG: join prunable for task partitionId 1 and 2 -DEBUG: join prunable for task partitionId 1 and 3 -DEBUG: join prunable for task partitionId 2 and 0 -DEBUG: join prunable for task partitionId 2 and 1 -DEBUG: join prunable for task partitionId 2 and 3 -DEBUG: join prunable for task partitionId 3 and 0 -DEBUG: join prunable for task partitionId 3 and 1 -DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 2 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 5 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 7 -DETAIL: Creating dependency on merge taskId 9 -DEBUG: pruning merge fetch taskId 8 -DETAIL: Creating dependency on merge taskId 9 -DEBUG: pruning merge fetch taskId 10 -DETAIL: Creating dependency on merge taskId 12 -DEBUG: pruning merge fetch taskId 11 -DETAIL: Creating dependency on merge taskId 12 -DEBUG: cannot use real time executor with repartition jobs -HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker. - QUERY PLAN -------------------------------------------------------------------- - Aggregate - -> Custom Scan (Citus Task-Tracker) - Task Count: 4 - Tasks Shown: None, not supported for re-partition queries - -> MapMergeJob - Map Task Count: 2 - Merge Task Count: 4 - -> MapMergeJob - Map Task Count: 2 - Merge Task Count: 4 -(10 rows) - --- Last, set the minimum value to 0 and check that we don't treat it as null. We --- should apply partition and join pruning for this shard now. Since it is not --- supported with single-repartition join, dual-repartition has been used. -UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; -EXPLAIN (COSTS FALSE) -SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; -LOG: join order: [ "lineitem" ] -DEBUG: Plan is router executable - QUERY PLAN -------------------------------------------------------------------------------- - Custom Scan (Citus Router) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=57637 dbname=regression - -> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem - Index Cond: (l_orderkey = 9030) -(7 rows) - -EXPLAIN (COSTS FALSE) -SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders - WHERE l_partkey = o_custkey; -LOG: join order: [ "lineitem" ][ dual partition join "orders" ] -DEBUG: join prunable for task partitionId 0 and 1 -DEBUG: join prunable for task partitionId 0 and 2 -DEBUG: join prunable for task partitionId 0 and 3 -DEBUG: join prunable for task partitionId 1 and 0 -DEBUG: join prunable for task partitionId 1 and 2 -DEBUG: join prunable for task partitionId 1 and 3 -DEBUG: join prunable for task partitionId 2 and 0 -DEBUG: join prunable for task partitionId 2 and 1 -DEBUG: join prunable for task partitionId 2 and 3 -DEBUG: join prunable for task partitionId 3 and 0 -DEBUG: join prunable for task partitionId 3 and 1 -DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 2 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 5 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 7 -DETAIL: Creating dependency on merge taskId 9 -DEBUG: pruning merge fetch taskId 8 -DETAIL: Creating dependency on merge taskId 9 -DEBUG: pruning merge fetch taskId 10 -DETAIL: Creating dependency on merge taskId 12 -DEBUG: pruning merge fetch taskId 11 -DETAIL: Creating dependency on merge taskId 12 -DEBUG: cannot use real time executor with repartition jobs -HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker. - QUERY PLAN -------------------------------------------------------------------- - Aggregate - -> Custom Scan (Citus Task-Tracker) - Task Count: 4 - Tasks Shown: None, not supported for re-partition queries - -> MapMergeJob - Map Task Count: 2 - Merge Task Count: 4 - -> MapMergeJob - Map Task Count: 2 - Merge Task Count: 4 -(10 rows) - --- Set minimum and maximum values for two shards back to their original values -UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000; -UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001; -SET client_min_messages TO NOTICE; diff --git a/src/test/regress/expected/multi_partitioning_1.out b/src/test/regress/expected/multi_partitioning_1.out deleted file mode 100644 index 12dacd4ab..000000000 --- a/src/test/regress/expected/multi_partitioning_1.out +++ /dev/null @@ -1,1706 +0,0 @@ --- --- Distributed Partitioned Table Tests --- -SET citus.next_shard_id TO 1660000; -SET citus.shard_count TO 4; -SET citus.shard_replication_factor TO 1; --- print major version number for version-specific tests -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int AS server_version; - server_version ----------------- - 9 -(1 row) - --- --- Distributed Partitioned Table Creation Tests --- --- 1-) Distributing partitioned table --- create partitioned table -CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test(id int, time date) PARTITION ... - ^ -CREATE TABLE partitioning_hash_test(id int, subid int) PARTITION BY HASH(subid); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...E TABLE partitioning_hash_test(id int, subid int) PARTITION ... - ^ --- create its partitions -CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2009 PARTITION OF partitionin... - ^ -CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2010 PARTITION OF partitionin... - ^ -CREATE TABLE partitioning_hash_test_0 PARTITION OF partitioning_hash_test FOR VALUES WITH (MODULUS 3, REMAINDER 0); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_hash_test_0 PARTITION OF partition... - ^ -CREATE TABLE partitioning_hash_test_1 PARTITION OF partitioning_hash_test FOR VALUES WITH (MODULUS 3, REMAINDER 1); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_hash_test_1 PARTITION OF partition... - ^ --- load some data and distribute tables -INSERT INTO partitioning_test VALUES (1, '2009-06-06'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES (1, '2009-06-06'); - ^ -INSERT INTO partitioning_test VALUES (2, '2010-07-07'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES (2, '2010-07-07'); - ^ -INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09'); -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09'); - ^ -INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); -ERROR: relation "partitioning_test_2010" does not exist -LINE 1: INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); - ^ -INSERT INTO partitioning_hash_test VALUES (1, 2); -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: INSERT INTO partitioning_hash_test VALUES (1, 2); - ^ -INSERT INTO partitioning_hash_test VALUES (2, 13); -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: INSERT INTO partitioning_hash_test VALUES (2, 13); - ^ -INSERT INTO partitioning_hash_test VALUES (3, 7); -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: INSERT INTO partitioning_hash_test VALUES (3, 7); - ^ -INSERT INTO partitioning_hash_test VALUES (4, 4); -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: INSERT INTO partitioning_hash_test VALUES (4, 4); - ^ --- distribute partitioned table -SELECT create_distributed_table('partitioning_test', 'id'); -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test', 'id'); - ^ -SELECT create_distributed_table('partitioning_hash_test', 'id'); -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: SELECT create_distributed_table('partitioning_hash_test', 'i... - ^ --- see the data is loaded to shards -SELECT * FROM partitioning_test ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test ORDER BY 1; - ^ -SELECT * FROM partitioning_hash_test ORDER BY 1; -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: SELECT * FROM partitioning_hash_test ORDER BY 1; - ^ --- see partitioned table and its partitions are distributed -SELECT - logicalrelid -FROM - pg_dist_partition -WHERE - logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') -ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 6: logicalrelid IN ('partitioning_test', 'partitioning_test_20... - ^ -SELECT - logicalrelid, count(*) -FROM pg_dist_shard - WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') -GROUP BY - logicalrelid -ORDER BY - 1,2; -ERROR: relation "partitioning_test" does not exist -LINE 4: WHERE logicalrelid IN ('partitioning_test', 'partitioning_t... - ^ -SELECT - logicalrelid -FROM - pg_dist_partition -WHERE - logicalrelid IN ('partitioning_hash_test', 'partitioning_hash_test_0', 'partitioning_hash_test_1') -ORDER BY 1; -ERROR: relation "partitioning_hash_test" does not exist -LINE 6: logicalrelid IN ('partitioning_hash_test', 'partitioning_ha... - ^ -SELECT - logicalrelid, count(*) -FROM pg_dist_shard - WHERE logicalrelid IN ('partitioning_hash_test', 'partitioning_hash_test_0', 'partitioning_hash_test_1') -GROUP BY - logicalrelid -ORDER BY - 1,2; -ERROR: relation "partitioning_hash_test" does not exist -LINE 4: WHERE logicalrelid IN ('partitioning_hash_test', 'partition... - ^ --- 2-) Creating partition of a distributed table -CREATE TABLE partitioning_test_2011 PARTITION OF partitioning_test FOR VALUES FROM ('2011-01-01') TO ('2012-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2011 PARTITION OF partitionin... - ^ --- new partition is automatically distributed as well -SELECT - logicalrelid -FROM - pg_dist_partition -WHERE - logicalrelid IN ('partitioning_test', 'partitioning_test_2011') -ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 6: logicalrelid IN ('partitioning_test', 'partitioning_test_20... - ^ -SELECT - logicalrelid, count(*) -FROM pg_dist_shard - WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') -GROUP BY - logicalrelid -ORDER BY - 1,2; -ERROR: relation "partitioning_test" does not exist -LINE 4: WHERE logicalrelid IN ('partitioning_test', 'partitioning_t... - ^ --- 3-) Attaching non distributed table to a distributed table -CREATE TABLE partitioning_test_2012(id int, time date); --- load some data -INSERT INTO partitioning_test_2012 VALUES (5, '2012-06-06'); -INSERT INTO partitioning_test_2012 VALUES (6, '2012-07-07'); -ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2012 FOR VALUES FROM ('2012-01-01') TO ('2013-01-01'); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_... - ^ --- attached partition is distributed as well -SELECT - logicalrelid -FROM - pg_dist_partition -WHERE - logicalrelid IN ('partitioning_test', 'partitioning_test_2012') -ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 6: logicalrelid IN ('partitioning_test', 'partitioning_test_20... - ^ -SELECT - logicalrelid, count(*) -FROM pg_dist_shard - WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') -GROUP BY - logicalrelid -ORDER BY - 1,2; -ERROR: relation "partitioning_test" does not exist -LINE 4: WHERE logicalrelid IN ('partitioning_test', 'partitioning_t... - ^ --- try to insert a new data to hash partitioned table --- no partition is defined for value 5 -INSERT INTO partitioning_hash_test VALUES (8, 5); -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: INSERT INTO partitioning_hash_test VALUES (8, 5); - ^ -INSERT INTO partitioning_hash_test VALUES (9, 12); -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: INSERT INTO partitioning_hash_test VALUES (9, 12); - ^ -CREATE TABLE partitioning_hash_test_2 (id int, subid int); -INSERT INTO partitioning_hash_test_2 VALUES (8, 5); -ALTER TABLE partitioning_hash_test ATTACH PARTITION partitioning_hash_test_2 FOR VALUES WITH (MODULUS 3, REMAINDER 2); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE partitioning_hash_test ATTACH PARTITION partitio... - ^ -INSERT INTO partitioning_hash_test VALUES (9, 12); -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: INSERT INTO partitioning_hash_test VALUES (9, 12); - ^ --- see the data is loaded to shards -SELECT * FROM partitioning_test ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test ORDER BY 1; - ^ -SELECT * FROM partitioning_hash_test ORDER BY 1; -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: SELECT * FROM partitioning_hash_test ORDER BY 1; - ^ --- 4-) Attaching distributed table to distributed table -CREATE TABLE partitioning_test_2013(id int, time date); -SELECT create_distributed_table('partitioning_test_2013', 'id'); - create_distributed_table --------------------------- - -(1 row) - --- load some data -INSERT INTO partitioning_test_2013 VALUES (7, '2013-06-06'); -INSERT INTO partitioning_test_2013 VALUES (8, '2013-07-07'); -ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES FROM ('2013-01-01') TO ('2014-01-01'); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_... - ^ --- see the data is loaded to shards -SELECT * FROM partitioning_test ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test ORDER BY 1; - ^ --- 5-) Failure cases while creating distributed partitioned tables --- cannot distribute a partition if its parent is not distributed -CREATE TABLE partitioning_test_failure(id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...ABLE partitioning_test_failure(id int, time date) PARTITION ... - ^ -CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_failure_2009 PARTITION OF par... - ^ -SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); -ERROR: relation "partitioning_test_failure_2009" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test_failure_2... - ^ --- only hash distributed tables can have partitions -SELECT create_distributed_table('partitioning_test_failure', 'id', 'append'); -ERROR: relation "partitioning_test_failure" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test_failure',... - ^ -SELECT create_distributed_table('partitioning_test_failure', 'id', 'range'); -ERROR: relation "partitioning_test_failure" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test_failure',... - ^ -SELECT create_reference_table('partitioning_test_failure'); -ERROR: relation "partitioning_test_failure" does not exist -LINE 1: SELECT create_reference_table('partitioning_test_failure'); - ^ -SET citus.shard_replication_factor TO 1; --- non-distributed tables cannot have distributed partitions; -DROP TABLE partitioning_test_failure_2009; -ERROR: table "partitioning_test_failure_2009" does not exist -CREATE TABLE partitioning_test_failure_2009(id int, time date); -SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); - create_distributed_table --------------------------- - -(1 row) - -ALTER TABLE partitioning_test_failure ATTACH PARTITION partitioning_test_failure_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE partitioning_test_failure ATTACH PARTITION parti... - ^ --- multi-level partitioning is not allowed -DROP TABLE partitioning_test_failure_2009; -CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01') PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_failure_2009 PARTITION OF par... - ^ -SELECT create_distributed_table('partitioning_test_failure', 'id'); -ERROR: relation "partitioning_test_failure" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test_failure',... - ^ --- multi-level partitioning is not allowed in different order -DROP TABLE partitioning_test_failure_2009; -ERROR: table "partitioning_test_failure_2009" does not exist -SELECT create_distributed_table('partitioning_test_failure', 'id'); -ERROR: relation "partitioning_test_failure" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test_failure',... - ^ -CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01') PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_failure_2009 PARTITION OF par... - ^ --- --- DMLs in distributed partitioned tables --- --- test COPY --- COPY data to partitioned table -COPY partitioning_test FROM STDIN WITH CSV; -ERROR: relation "partitioning_test" does not exist -9,2009-01-01 -10,2010-01-01 -11,2011-01-01 -12,2012-01-01 -\. -invalid command \. --- COPY data to partition directly -COPY partitioning_test_2009 FROM STDIN WITH CSV; -ERROR: syntax error at or near "9" -LINE 1: 9,2009-01-01 - ^ -13,2009-01-02 -14,2009-01-03 -\. -invalid command \. --- see the data is loaded to shards -SELECT * FROM partitioning_test WHERE id >= 9 ORDER BY 1; -ERROR: syntax error at or near "13" -LINE 1: 13,2009-01-02 - ^ --- test INSERT --- INSERT INTO the partitioned table -INSERT INTO partitioning_test VALUES(15, '2009-02-01'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES(15, '2009-02-01'); - ^ -INSERT INTO partitioning_test VALUES(16, '2010-02-01'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES(16, '2010-02-01'); - ^ -INSERT INTO partitioning_test VALUES(17, '2011-02-01'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES(17, '2011-02-01'); - ^ -INSERT INTO partitioning_test VALUES(18, '2012-02-01'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES(18, '2012-02-01'); - ^ --- INSERT INTO the partitions directly table -INSERT INTO partitioning_test VALUES(19, '2009-02-02'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES(19, '2009-02-02'); - ^ -INSERT INTO partitioning_test VALUES(20, '2010-02-02'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES(20, '2010-02-02'); - ^ --- see the data is loaded to shards -SELECT * FROM partitioning_test WHERE id >= 15 ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE id >= 15 ORDER BY 1; - ^ --- test INSERT/SELECT --- INSERT/SELECT from partition to partitioned table -INSERT INTO partitioning_test SELECT * FROM partitioning_test_2011; -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test SELECT * FROM partitioning_tes... - ^ --- INSERT/SELECT from partitioned table to partition -INSERT INTO partitioning_test_2012 SELECT * FROM partitioning_test WHERE time >= '2012-01-01' AND time < '2013-01-01'; -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test_2012 SELECT * FROM partitionin... - ^ --- see the data is loaded to shards (rows in the given range should be duplicated) -SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2013-01-01' ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE time >= '2011-01-01' A... - ^ --- test UPDATE --- UPDATE partitioned table -UPDATE partitioning_test SET time = '2013-07-07' WHERE id = 7; -ERROR: relation "partitioning_test" does not exist -LINE 1: UPDATE partitioning_test SET time = '2013-07-07' WHERE id = ... - ^ --- UPDATE partition directly -UPDATE partitioning_test_2013 SET time = '2013-08-08' WHERE id = 8; --- see the data is updated -SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER... - ^ --- UPDATE that tries to move a row to a non-existing partition (this should fail) -UPDATE partitioning_test SET time = '2020-07-07' WHERE id = 7; -ERROR: relation "partitioning_test" does not exist -LINE 1: UPDATE partitioning_test SET time = '2020-07-07' WHERE id = ... - ^ --- UPDATE with subqueries on partitioned table -UPDATE - partitioning_test -SET - time = time + INTERVAL '1 day' -WHERE - id IN (SELECT id FROM partitioning_test WHERE id = 1); -ERROR: relation "partitioning_test" does not exist -LINE 2: partitioning_test - ^ --- UPDATE with subqueries on partition -UPDATE - partitioning_test_2009 -SET - time = time + INTERVAL '1 month' -WHERE - id IN (SELECT id FROM partitioning_test WHERE id = 2); -ERROR: relation "partitioning_test_2009" does not exist -LINE 2: partitioning_test_2009 - ^ --- see the data is updated -SELECT * FROM partitioning_test WHERE id = 1 OR id = 2 ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE id = 1 OR id = 2 ORDER... - ^ --- test DELETE --- DELETE from partitioned table -DELETE FROM partitioning_test WHERE id = 9; -ERROR: relation "partitioning_test" does not exist -LINE 1: DELETE FROM partitioning_test WHERE id = 9; - ^ --- DELETE from partition directly -DELETE FROM partitioning_test_2010 WHERE id = 10; -ERROR: relation "partitioning_test_2010" does not exist -LINE 1: DELETE FROM partitioning_test_2010 WHERE id = 10; - ^ --- see the data is deleted -SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDE... - ^ --- create default partition -CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT; -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_default PARTITION OF partitio... - ^ -\d+ partitioning_test -INSERT INTO partitioning_test VALUES(21, '2014-02-02'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES(21, '2014-02-02'); - ^ -INSERT INTO partitioning_test VALUES(22, '2015-04-02'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES(22, '2015-04-02'); - ^ --- see they are inserted into default partition -SELECT * FROM partitioning_test WHERE id > 20; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE id > 20; - ^ -SELECT * FROM partitioning_test_default; -ERROR: relation "partitioning_test_default" does not exist -LINE 1: SELECT * FROM partitioning_test_default; - ^ --- create a new partition (will fail) -CREATE TABLE partitioning_test_2014 PARTITION OF partitioning_test FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2014 PARTITION OF partitionin... - ^ -BEGIN; -ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_default; -ERROR: syntax error at or near "DETACH" -LINE 1: ALTER TABLE partitioning_test DETACH PARTITION partitioning_... - ^ -CREATE TABLE partitioning_test_2014 PARTITION OF partitioning_test FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2014 PARTITION OF partitionin... - ^ -INSERT INTO partitioning_test SELECT * FROM partitioning_test_default WHERE time >= '2014-01-01' AND time < '2015-01-01'; -ERROR: current transaction is aborted, commands ignored until end of transaction block -DELETE FROM partitioning_test_default WHERE time >= '2014-01-01' AND time < '2015-01-01'; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_default DEFAULT; -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_... - ^ -END; --- see data is in the table, but some moved out from default partition -SELECT * FROM partitioning_test WHERE id > 20; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE id > 20; - ^ -SELECT * FROM partitioning_test_default; -ERROR: relation "partitioning_test_default" does not exist -LINE 1: SELECT * FROM partitioning_test_default; - ^ --- test master_modify_multiple_shards --- master_modify_multiple_shards on partitioned table -SELECT master_modify_multiple_shards('UPDATE partitioning_test SET time = time + INTERVAL ''1 day'''); -ERROR: relation "partitioning_test" does not exist --- see rows are UPDATED -SELECT * FROM partitioning_test ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test ORDER BY 1; - ^ --- master_modify_multiple_shards on partition directly -SELECT master_modify_multiple_shards('UPDATE partitioning_test_2009 SET time = time + INTERVAL ''1 day'''); -ERROR: relation "partitioning_test_2009" does not exist --- see rows are UPDATED -SELECT * FROM partitioning_test_2009 ORDER BY 1; -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: SELECT * FROM partitioning_test_2009 ORDER BY 1; - ^ --- test master_modify_multiple_shards which fails in workers (updated value is outside of partition bounds) -SELECT master_modify_multiple_shards('UPDATE partitioning_test_2009 SET time = time + INTERVAL ''6 month'''); -ERROR: relation "partitioning_test_2009" does not exist --- --- DDL in distributed partitioned tables --- --- test CREATE INDEX --- CREATE INDEX on partitioned table - this will error out --- on earlier versions of postgres earlier than 11. -CREATE INDEX partitioning_index ON partitioning_test(id); -ERROR: relation "partitioning_test" does not exist --- CREATE INDEX on partition -CREATE INDEX partitioning_2009_index ON partitioning_test_2009(id); -ERROR: relation "partitioning_test_2009" does not exist --- CREATE INDEX CONCURRENTLY on partition -CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id); -ERROR: relation "partitioning_test_2010" does not exist --- see index is created -SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test%' ORDER BY indexname; - tablename | indexname ------------+----------- -(0 rows) - --- test drop --- indexes created on parent table can only be dropped on parent table --- ie using the same index name --- following will fail -DROP INDEX partitioning_test_2009_id_idx; -ERROR: index "partitioning_test_2009_id_idx" does not exist --- but dropping index on parent table will succeed -DROP INDEX partitioning_index; -ERROR: index "partitioning_index" does not exist --- this index was already created on partition table -DROP INDEX partitioning_2009_index; -ERROR: index "partitioning_2009_index" does not exist --- test drop index on non-distributed, partitioned table -CREATE TABLE non_distributed_partitioned_table(a int, b int) PARTITION BY RANGE (a); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...E non_distributed_partitioned_table(a int, b int) PARTITION ... - ^ -CREATE TABLE non_distributed_partitioned_table_1 PARTITION OF non_distributed_partitioned_table -FOR VALUES FROM (0) TO (10); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE non_distributed_partitioned_table_1 PARTITION O... - ^ -CREATE INDEX non_distributed_partitioned_table_index ON non_distributed_partitioned_table(a); -ERROR: relation "non_distributed_partitioned_table" does not exist --- see index is created -SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed%' ORDER BY indexname; - tablename | indexname ------------+----------- -(0 rows) - --- drop the index and see it is dropped -DROP INDEX non_distributed_partitioned_table_index; -ERROR: index "non_distributed_partitioned_table_index" does not exist -SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed%' ORDER BY indexname; - tablename | indexname ------------+----------- -(0 rows) - --- test add COLUMN --- add COLUMN to partitioned table -ALTER TABLE partitioning_test ADD new_column int; -ERROR: relation "partitioning_test" does not exist --- add COLUMN to partition - this will error out -ALTER TABLE partitioning_test_2010 ADD new_column_2 int; -ERROR: relation "partitioning_test_2010" does not exist --- see additional column is created -SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT name, type FROM table_attrs WHERE relid = 'partitioni... - ^ -SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test_2010'::regclass ORDER BY 1; -ERROR: relation "partitioning_test_2010" does not exist -LINE 1: SELECT name, type FROM table_attrs WHERE relid = 'partitioni... - ^ --- test add PRIMARY KEY --- add PRIMARY KEY to partitioned table - this will error out -ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_primary PRIMARY KEY (id); -ERROR: relation "partitioning_test" does not exist --- ADD PRIMARY KEY to partition -ALTER TABLE partitioning_test_2009 ADD CONSTRAINT partitioning_2009_primary PRIMARY KEY (id); -ERROR: relation "partitioning_test_2009" does not exist --- see PRIMARY KEY is created -SELECT - table_name, - constraint_name, - constraint_type -FROM - information_schema.table_constraints -WHERE - table_name = 'partitioning_test_2009' AND - constraint_name = 'partitioning_2009_primary'; - table_name | constraint_name | constraint_type -------------+-----------------+----------------- -(0 rows) - --- however, you can add primary key if it contains both distribution and partition key -ALTER TABLE partitioning_hash_test ADD CONSTRAINT partitioning_hash_primary PRIMARY KEY (id, subid); -ERROR: relation "partitioning_hash_test" does not exist --- see PRIMARY KEY is created -SELECT - table_name, - constraint_name, - constraint_type -FROM - information_schema.table_constraints -WHERE - table_name LIKE 'partitioning_hash_test%' AND - constraint_type = 'PRIMARY KEY' -ORDER BY 1; - table_name | constraint_name | constraint_type -------------+-----------------+----------------- -(0 rows) - --- test ADD FOREIGN CONSTRAINT --- add FOREIGN CONSTRAINT to partitioned table -- this will error out (it is a self reference) -ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_foreign FOREIGN KEY (id) REFERENCES partitioning_test_2009 (id); -ERROR: relation "partitioning_test" does not exist --- add FOREIGN CONSTRAINT to partition -INSERT INTO partitioning_test_2009 VALUES (5, '2009-06-06'); -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: INSERT INTO partitioning_test_2009 VALUES (5, '2009-06-06'); - ^ -INSERT INTO partitioning_test_2009 VALUES (6, '2009-07-07'); -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: INSERT INTO partitioning_test_2009 VALUES (6, '2009-07-07'); - ^ -INSERT INTO partitioning_test_2009 VALUES(12, '2009-02-01'); -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: INSERT INTO partitioning_test_2009 VALUES(12, '2009-02-01'); - ^ -INSERT INTO partitioning_test_2009 VALUES(18, '2009-02-01'); -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: INSERT INTO partitioning_test_2009 VALUES(18, '2009-02-01'); - ^ -ALTER TABLE partitioning_test_2012 ADD CONSTRAINT partitioning_2012_foreign FOREIGN KEY (id) REFERENCES partitioning_test_2009 (id) ON DELETE CASCADE; -ERROR: relation "partitioning_test_2009" does not exist --- see FOREIGN KEY is created -SELECT "Constraint" FROM table_fkeys WHERE relid = 'partitioning_test_2012'::regclass ORDER BY 1; - Constraint ------------- -(0 rows) - --- test ON DELETE CASCADE works -DELETE FROM partitioning_test_2009 WHERE id = 5; -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: DELETE FROM partitioning_test_2009 WHERE id = 5; - ^ --- see that element is deleted from both partitions -SELECT * FROM partitioning_test_2009 WHERE id = 5 ORDER BY 1; -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: SELECT * FROM partitioning_test_2009 WHERE id = 5 ORDER BY 1... - ^ -SELECT * FROM partitioning_test_2012 WHERE id = 5 ORDER BY 1; - id | time -----+------------ - 5 | 06-06-2012 -(1 row) - --- test DETACH partition -ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009; -ERROR: syntax error at or near "DETACH" -LINE 1: ALTER TABLE partitioning_test DETACH PARTITION partitioning_... - ^ --- see DETACHed partitions content is not accessible from partitioning_test; -SELECT * FROM partitioning_test WHERE time >= '2009-01-01' AND time < '2010-01-01' ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE time >= '2009-01-01' A... - ^ --- delete from default partition -DELETE FROM partitioning_test WHERE time >= '2015-01-01'; -ERROR: relation "partitioning_test" does not exist -LINE 1: DELETE FROM partitioning_test WHERE time >= '2015-01-01'; - ^ -SELECT * FROM partitioning_test_default; -ERROR: relation "partitioning_test_default" does not exist -LINE 1: SELECT * FROM partitioning_test_default; - ^ --- create a reference table for foreign key test -CREATE TABLE partitioning_test_reference(id int PRIMARY KEY, subid int); -INSERT INTO partitioning_test_reference SELECT a, a FROM generate_series(1, 50) a; -SELECT create_reference_table('partitioning_test_reference'); -NOTICE: Copying data from local table... - create_reference_table ------------------------- - -(1 row) - -ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_reference_fkey FOREIGN KEY (id) REFERENCES partitioning_test_reference(id) ON DELETE CASCADE; -ERROR: relation "partitioning_test" does not exist -CREATE TABLE partitioning_test_foreign_key(id int PRIMARY KEY, value int); -SELECT create_distributed_table('partitioning_test_foreign_key', 'id'); - create_distributed_table --------------------------- - -(1 row) - -INSERT INTO partitioning_test_foreign_key SELECT * FROM partitioning_test_reference; -ALTER TABLE partitioning_hash_test ADD CONSTRAINT partitioning_reference_fk_test FOREIGN KEY (id) REFERENCES partitioning_test_foreign_key(id) ON DELETE CASCADE; -ERROR: relation "partitioning_hash_test" does not exist --- check foreign keys on partitions -SELECT - table_name, constraint_name, constraint_type FROm information_schema.table_constraints -WHERE - table_name LIKE 'partitioning_hash_test%' AND - constraint_type = 'FOREIGN KEY' -ORDER BY - 1,2; - table_name | constraint_name | constraint_type -------------+-----------------+----------------- -(0 rows) - --- check foreign keys on partition shards --- there is some text ordering issue regarding table name --- forcing integer sort by extracting shardid -CREATE TYPE foreign_key_details AS (table_name text, constraint_name text, constraint_type text); -SELECT right(table_name, 7)::int as shardid, * FROM ( - SELECT (json_populate_record(NULL::foreign_key_details, - json_array_elements_text(result::json)::json )).* - FROM run_command_on_workers($$ - SELECT - COALESCE(json_agg(row_to_json(q)), '[]'::json) - FROM ( - SELECT - table_name, constraint_name, constraint_type - FROM information_schema.table_constraints - WHERE - table_name LIKE 'partitioning_hash_test%' AND - constraint_type = 'FOREIGN KEY' - ORDER BY 1, 2, 3 - ) q - $$) ) w -ORDER BY 1, 2, 3, 4; - shardid | table_name | constraint_name | constraint_type ----------+------------+-----------------+----------------- -(0 rows) - -DROP TYPE foreign_key_details; --- set replication factor back to 1 since it gots reset --- after connection re-establishment -SET citus.shard_replication_factor TO 1; -SELECT * FROM partitioning_test WHERE id = 11 or id = 12; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE id = 11 or id = 12; - ^ -DELETE FROM partitioning_test_reference WHERE id = 11 or id = 12; -SELECT * FROM partitioning_hash_test ORDER BY 1, 2; -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: SELECT * FROM partitioning_hash_test ORDER BY 1, 2; - ^ -DELETE FROM partitioning_test_foreign_key WHERE id = 2 OR id = 9; --- see data is deleted from referencing table -SELECT * FROM partitioning_test WHERE id = 11 or id = 12; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE id = 11 or id = 12; - ^ -SELECT * FROM partitioning_hash_test ORDER BY 1, 2; -ERROR: relation "partitioning_hash_test" does not exist -LINE 1: SELECT * FROM partitioning_hash_test ORDER BY 1, 2; - ^ --- --- Transaction tests --- --- DDL in transaction -BEGIN; -ALTER TABLE partitioning_test ADD newer_column int; -ERROR: relation "partitioning_test" does not exist --- see additional column is created -SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- see rollback is successful -SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT name, type FROM table_attrs WHERE relid = 'partitioni... - ^ --- COPY in transaction -BEGIN; -COPY partitioning_test FROM STDIN WITH CSV; -ERROR: relation "partitioning_test" does not exist -22,2010-01-01,22 -23,2011-01-01,23 -24,2013-01-01,24 -\. -invalid command \. --- see the data is loaded to shards -SELECT * FROM partitioning_test WHERE id = 22 ORDER BY 1; -ERROR: syntax error at or near "22" -LINE 1: 22,2010-01-01,22 - ^ -SELECT * FROM partitioning_test WHERE id = 23 ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -SELECT * FROM partitioning_test WHERE id = 24 ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- see rollback is successful -SELECT * FROM partitioning_test WHERE id >= 22 ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE id >= 22 ORDER BY 1; - ^ --- DML in transaction -BEGIN; --- INSERT in transaction -INSERT INTO partitioning_test VALUES(25, '2010-02-02'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES(25, '2010-02-02'); - ^ --- see the data is loaded to shards -SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block --- INSERT/SELECT in transaction -INSERT INTO partitioning_test SELECT * FROM partitioning_test WHERE id = 25; -ERROR: current transaction is aborted, commands ignored until end of transaction block --- see the data is loaded to shards -SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block --- UPDATE in transaction -UPDATE partitioning_test SET time = '2010-10-10' WHERE id = 25; -ERROR: current transaction is aborted, commands ignored until end of transaction block --- see the data is updated -SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block --- perform operations on partition and partioned tables together -INSERT INTO partitioning_test VALUES(26, '2010-02-02', 26); -ERROR: current transaction is aborted, commands ignored until end of transaction block -INSERT INTO partitioning_test_2010 VALUES(26, '2010-02-02', 26); -ERROR: current transaction is aborted, commands ignored until end of transaction block -COPY partitioning_test FROM STDIN WITH CSV; -ERROR: current transaction is aborted, commands ignored until end of transaction block -26,2010-02-02,26 -\. -invalid command \. -COPY partitioning_test_2010 FROM STDIN WITH CSV; -ERROR: syntax error at or near "26" -LINE 1: 26,2010-02-02,26 - ^ -26,2010-02-02,26 -\. -invalid command \. --- see the data is loaded to shards (we should see 4 rows with same content) -SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; -ERROR: syntax error at or near "26" -LINE 1: 26,2010-02-02,26 - ^ -ROLLBACK; --- see rollback is successful -SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; - ^ --- DETACH and DROP in a transaction -BEGIN; -ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2011; -ERROR: syntax error at or near "DETACH" -LINE 1: ALTER TABLE partitioning_test DETACH PARTITION partitioning_... - ^ -DROP TABLE partitioning_test_2011; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- see DROPed partitions content is not accessible -SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2012-01-01' ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE time >= '2011-01-01' A... - ^ --- --- Misc tests --- --- test TRUNCATE --- test TRUNCATE partition -TRUNCATE partitioning_test_2012; --- see partition is TRUNCATEd -SELECT * FROM partitioning_test_2012 ORDER BY 1; - id | time -----+------ -(0 rows) - --- test TRUNCATE partitioned table -TRUNCATE partitioning_test; -ERROR: relation "partitioning_test" does not exist --- see partitioned table is TRUNCATEd -SELECT * FROM partitioning_test ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test ORDER BY 1; - ^ --- test DROP --- test DROP partition -INSERT INTO partitioning_test_2010 VALUES(27, '2010-02-01'); -ERROR: relation "partitioning_test_2010" does not exist -LINE 1: INSERT INTO partitioning_test_2010 VALUES(27, '2010-02-01'); - ^ -DROP TABLE partitioning_test_2010; -ERROR: table "partitioning_test_2010" does not exist --- see DROPped partitions content is not accessible from partitioning_test; -SELECT * FROM partitioning_test WHERE time >= '2010-01-01' AND time < '2011-01-01' ORDER BY 1; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT * FROM partitioning_test WHERE time >= '2010-01-01' A... - ^ --- test DROP partitioned table -DROP TABLE partitioning_test; -ERROR: table "partitioning_test" does not exist -DROP TABLE partitioning_test_reference; --- dropping the parent should CASCADE to the children as well -SELECT table_name FROM information_schema.tables WHERE table_name LIKE 'partitioning_test%' ORDER BY 1; - table_name -------------------------------- - partitioning_test_2012 - partitioning_test_2013 - partitioning_test_foreign_key -(3 rows) - --- test distributing partitioned table colocated with non-partitioned table -CREATE TABLE partitioned_users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ... int, value_2 int, value_3 float, value_4 bigint) PARTITION ... - ^ -CREATE TABLE partitioned_events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ... int, value_2 int, value_3 float, value_4 bigint) PARTITION ... - ^ -SELECT create_distributed_table('partitioned_users_table', 'user_id', colocate_with => 'users_table'); -ERROR: relation "partitioned_users_table" does not exist -LINE 1: SELECT create_distributed_table('partitioned_users_table', '... - ^ -SELECT create_distributed_table('partitioned_events_table', 'user_id', colocate_with => 'events_table'); -ERROR: relation "partitioned_events_table" does not exist -LINE 1: SELECT create_distributed_table('partitioned_events_table', ... - ^ --- INSERT/SELECT from regular table to partitioned table -CREATE TABLE partitioned_users_table_2009 PARTITION OF partitioned_users_table FOR VALUES FROM ('2017-01-01') TO ('2018-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioned_users_table_2009 PARTITION OF parti... - ^ -CREATE TABLE partitioned_events_table_2009 PARTITION OF partitioned_events_table FOR VALUES FROM ('2017-01-01') TO ('2018-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioned_events_table_2009 PARTITION OF part... - ^ -INSERT INTO partitioned_events_table SELECT * FROM events_table; -ERROR: relation "partitioned_events_table" does not exist -LINE 1: INSERT INTO partitioned_events_table SELECT * FROM events_ta... - ^ -INSERT INTO partitioned_users_table_2009 SELECT * FROM users_table; -ERROR: relation "partitioned_users_table_2009" does not exist -LINE 1: INSERT INTO partitioned_users_table_2009 SELECT * FROM users... - ^ --- --- Complex JOINs, subqueries, UNIONs etc... --- --- subquery with UNIONs on partitioned table -SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType -FROM - (SELECT *, random() - FROM - (SELECT - "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" - FROM - (SELECT - "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events - FROM( - (SELECT - "events"."user_id", "events"."time", 0 AS event - FROM - partitioned_events_table as "events" - WHERE - event_type IN (1, 2) ) - UNION - (SELECT - "events"."user_id", "events"."time", 1 AS event - FROM - partitioned_events_table as "events" - WHERE - event_type IN (3, 4) ) - UNION - (SELECT - "events"."user_id", "events"."time", 2 AS event - FROM - partitioned_events_table as "events" - WHERE - event_type IN (5, 6) ) - UNION - (SELECT - "events"."user_id", "events"."time", 3 AS event - FROM - partitioned_events_table as "events" - WHERE - event_type IN (1, 6))) t1 - GROUP BY "t1"."user_id") AS t) "q" -) AS final_query -GROUP BY types -ORDER BY types; -ERROR: relation "partitioned_events_table" does not exist -LINE 14: partitioned_events_table as "events" - ^ --- UNION and JOIN on both partitioned and regular tables -SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType -FROM - (SELECT - *, random() - FROM - (SELECT - "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" - FROM - (SELECT - "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events - FROM ( - (SELECT - * - FROM - (SELECT - "events"."time", 0 AS event, "events"."user_id" - FROM - partitioned_events_table as "events" - WHERE - event_type IN (1, 2)) events_subquery_1) - UNION - (SELECT * - FROM - ( - SELECT * FROM - ( - SELECT - max("events"."time"), - 0 AS event, - "events"."user_id" - FROM - events_table as "events", users_table as "users" - WHERE - events.user_id = users.user_id AND - event_type IN (1, 2) - GROUP BY "events"."user_id" - ) as events_subquery_5 - ) events_subquery_2) - UNION - (SELECT * - FROM - (SELECT - "events"."time", 2 AS event, "events"."user_id" - FROM - partitioned_events_table as "events" - WHERE - event_type IN (3, 4)) events_subquery_3) - UNION - (SELECT * - FROM - (SELECT - "events"."time", 3 AS event, "events"."user_id" - FROM - events_table as "events" - WHERE - event_type IN (5, 6)) events_subquery_4) - ) t1 - GROUP BY "t1"."user_id") AS t) "q" -INNER JOIN - (SELECT - "users"."user_id" - FROM - partitioned_users_table as "users" - WHERE - value_1 > 2 and value_1 < 5) AS t - ON (t.user_id = q.user_id)) as final_query -GROUP BY - types -ORDER BY - types; -ERROR: relation "partitioned_events_table" does not exist -LINE 18: partitioned_events_table as "events" - ^ --- test LIST partitioning -CREATE TABLE list_partitioned_events_table (user_id int, time date, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY LIST (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ... int, value_2 int, value_3 float, value_4 bigint) PARTITION ... - ^ -CREATE TABLE list_partitioned_events_table_2014_01_01_05 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2017-11-21', '2017-11-22', '2017-11-23', '2017-11-24', '2017-11-25'); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...TABLE list_partitioned_events_table_2014_01_01_05 PARTITION ... - ^ -CREATE TABLE list_partitioned_events_table_2014_01_06_10 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2017-11-26', '2017-11-27', '2017-11-28', '2017-11-29', '2017-11-30'); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...TABLE list_partitioned_events_table_2014_01_06_10 PARTITION ... - ^ -CREATE TABLE list_partitioned_events_table_2014_01_11_15 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2017-12-01', '2017-12-02', '2017-12-03', '2017-12-04', '2017-12-05'); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...TABLE list_partitioned_events_table_2014_01_11_15 PARTITION ... - ^ --- test distributing partitioned table colocated with another partitioned table -SELECT create_distributed_table('list_partitioned_events_table', 'user_id', colocate_with => 'partitioned_events_table'); -ERROR: relation "list_partitioned_events_table" does not exist -LINE 1: SELECT create_distributed_table('list_partitioned_events_tab... - ^ --- INSERT/SELECT from partitioned table to partitioned table -INSERT INTO - list_partitioned_events_table -SELECT - user_id, - date_trunc('day', time) as time, - event_type, - value_2, - value_3, - value_4 -FROM - events_table -WHERE - time >= '2017-11-21' AND - time <= '2017-12-01'; -ERROR: relation "list_partitioned_events_table" does not exist -LINE 2: list_partitioned_events_table - ^ --- LEFT JOINs used with INNER JOINs on range partitioned table, list partitioned table and non-partitioned table -SELECT -count(*) AS cnt, "generated_group_field" - FROM - (SELECT - "eventQuery"."user_id", random(), generated_group_field - FROM - (SELECT - "multi_group_wrapper_1".*, generated_group_field, random() - FROM - (SELECT * - FROM - (SELECT - "list_partitioned_events_table"."time", "list_partitioned_events_table"."user_id" as event_user_id - FROM - list_partitioned_events_table as "list_partitioned_events_table" - WHERE - user_id > 2) "temp_data_queries" - INNER JOIN - (SELECT - "users"."user_id" - FROM - partitioned_users_table as "users" - WHERE - user_id > 2 and value_2 = 1) "user_filters_1" - ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1" - LEFT JOIN - (SELECT - "users"."user_id" AS "user_id", value_2 AS "generated_group_field" - FROM - partitioned_users_table as "users") "left_group_by_1" - ON ("left_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery" - GROUP BY - "generated_group_field" - ORDER BY - cnt DESC, generated_group_field ASC - LIMIT 10; -ERROR: relation "list_partitioned_events_table" does not exist -LINE 15: list_partitioned_events_table as "list_partitio... - ^ --- --- Additional partitioning features --- --- test multi column partitioning -CREATE TABLE multi_column_partitioning(c1 int, c2 int) PARTITION BY RANGE (c1, c2); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...E TABLE multi_column_partitioning(c1 int, c2 int) PARTITION ... - ^ -CREATE TABLE multi_column_partitioning_0_0_10_0 PARTITION OF multi_column_partitioning FOR VALUES FROM (0, 0) TO (10, 0); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE multi_column_partitioning_0_0_10_0 PARTITION OF... - ^ -SELECT create_distributed_table('multi_column_partitioning', 'c1'); -ERROR: relation "multi_column_partitioning" does not exist -LINE 1: SELECT create_distributed_table('multi_column_partitioning',... - ^ --- test INSERT to multi-column partitioned table -INSERT INTO multi_column_partitioning VALUES(1, 1); -ERROR: relation "multi_column_partitioning" does not exist -LINE 1: INSERT INTO multi_column_partitioning VALUES(1, 1); - ^ -INSERT INTO multi_column_partitioning_0_0_10_0 VALUES(5, -5); -ERROR: relation "multi_column_partitioning_0_0_10_0" does not exist -LINE 1: INSERT INTO multi_column_partitioning_0_0_10_0 VALUES(5, -5)... - ^ --- test INSERT to multi-column partitioned table where no suitable partition exists -INSERT INTO multi_column_partitioning VALUES(10, 1); -ERROR: relation "multi_column_partitioning" does not exist -LINE 1: INSERT INTO multi_column_partitioning VALUES(10, 1); - ^ --- test with MINVALUE/MAXVALUE -CREATE TABLE multi_column_partitioning_10_max_20_min PARTITION OF multi_column_partitioning FOR VALUES FROM (10, MAXVALUE) TO (20, MINVALUE); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...ATE TABLE multi_column_partitioning_10_max_20_min PARTITION ... - ^ --- test INSERT to partition with MINVALUE/MAXVALUE bounds -INSERT INTO multi_column_partitioning VALUES(11, -11); -ERROR: relation "multi_column_partitioning" does not exist -LINE 1: INSERT INTO multi_column_partitioning VALUES(11, -11); - ^ -INSERT INTO multi_column_partitioning_10_max_20_min VALUES(19, -19); -ERROR: relation "multi_column_partitioning_10_max_20_min" does not exist -LINE 1: INSERT INTO multi_column_partitioning_10_max_20_min VALUES(1... - ^ --- test INSERT to multi-column partitioned table where no suitable partition exists -INSERT INTO multi_column_partitioning VALUES(20, -20); -ERROR: relation "multi_column_partitioning" does not exist -LINE 1: INSERT INTO multi_column_partitioning VALUES(20, -20); - ^ --- see data is loaded to multi-column partitioned table -SELECT * FROM multi_column_partitioning ORDER BY 1, 2; -ERROR: relation "multi_column_partitioning" does not exist -LINE 1: SELECT * FROM multi_column_partitioning ORDER BY 1, 2; - ^ --- --- Tests for locks on partitioned tables --- -CREATE TABLE partitioning_locks(id int, ref_id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...partitioning_locks(id int, ref_id int, time date) PARTITION ... - ^ --- create its partitions -CREATE TABLE partitioning_locks_2009 PARTITION OF partitioning_locks FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_locks_2009 PARTITION OF partitioni... - ^ -CREATE TABLE partitioning_locks_2010 PARTITION OF partitioning_locks FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_locks_2010 PARTITION OF partitioni... - ^ --- distribute partitioned table -SELECT create_distributed_table('partitioning_locks', 'id'); -ERROR: relation "partitioning_locks" does not exist -LINE 1: SELECT create_distributed_table('partitioning_locks', 'id'); - ^ --- test locks on router SELECT -BEGIN; -SELECT * FROM partitioning_locks WHERE id = 1 ORDER BY 1, 2; -ERROR: relation "partitioning_locks" does not exist -LINE 1: SELECT * FROM partitioning_locks WHERE id = 1 ORDER BY 1, 2; - ^ -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test locks on real-time SELECT -BEGIN; -SELECT * FROM partitioning_locks ORDER BY 1, 2; -ERROR: relation "partitioning_locks" does not exist -LINE 1: SELECT * FROM partitioning_locks ORDER BY 1, 2; - ^ -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test locks on task-tracker SELECT -SET citus.task_executor_type TO 'task-tracker'; -BEGIN; -SELECT * FROM partitioning_locks AS pl1 JOIN partitioning_locks AS pl2 ON pl1.id = pl2.ref_id ORDER BY 1, 2; -ERROR: relation "partitioning_locks" does not exist -LINE 1: SELECT * FROM partitioning_locks AS pl1 JOIN partitioning_lo... - ^ -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; -SET citus.task_executor_type TO 'real-time'; --- test locks on INSERT -BEGIN; -INSERT INTO partitioning_locks VALUES(1, 1, '2009-01-01'); -ERROR: relation "partitioning_locks" does not exist -LINE 1: INSERT INTO partitioning_locks VALUES(1, 1, '2009-01-01'); - ^ -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test locks on UPDATE -BEGIN; -UPDATE partitioning_locks SET time = '2009-02-01' WHERE id = 1; -ERROR: relation "partitioning_locks" does not exist -LINE 1: UPDATE partitioning_locks SET time = '2009-02-01' WHERE id =... - ^ -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test locks on DELETE -BEGIN; -DELETE FROM partitioning_locks WHERE id = 1; -ERROR: relation "partitioning_locks" does not exist -LINE 1: DELETE FROM partitioning_locks WHERE id = 1; - ^ -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test locks on INSERT/SELECT -CREATE TABLE partitioning_locks_for_select(id int, ref_id int, time date); -SELECT create_distributed_table('partitioning_locks_for_select', 'id'); - create_distributed_table --------------------------- - -(1 row) - -BEGIN; -INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select; -ERROR: relation "partitioning_locks" does not exist -LINE 1: INSERT INTO partitioning_locks SELECT * FROM partitioning_lo... - ^ -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test locks on coordinator INSERT/SELECT -BEGIN; -INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select LIMIT 5; -ERROR: relation "partitioning_locks" does not exist -LINE 1: INSERT INTO partitioning_locks SELECT * FROM partitioning_lo... - ^ -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test locks on master_modify_multiple_shards -BEGIN; -SELECT master_modify_multiple_shards('UPDATE partitioning_locks SET time = ''2009-03-01'''); -ERROR: relation "partitioning_locks" does not exist -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test locks on DDL -BEGIN; -ALTER TABLE partitioning_locks ADD COLUMN new_column int; -ERROR: relation "partitioning_locks" does not exist -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test locks on TRUNCATE -BEGIN; -TRUNCATE partitioning_locks; -ERROR: relation "partitioning_locks" does not exist -SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test shard resource locks with master_modify_multiple_shards -BEGIN; -SELECT master_modify_multiple_shards('UPDATE partitioning_locks_2009 SET time = ''2009-03-01'''); -ERROR: relation "partitioning_locks_2009" does not exist --- see the locks on parent table -SELECT - logicalrelid, - locktype, - mode -FROM - pg_locks AS l JOIN pg_dist_shard AS s -ON - l.objid = s.shardid -WHERE - logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND - pid = pg_backend_pid() -ORDER BY - 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test shard resource locks with TRUNCATE -BEGIN; -TRUNCATE partitioning_locks_2009; -ERROR: relation "partitioning_locks_2009" does not exist --- see the locks on parent table -SELECT - logicalrelid, - locktype, - mode -FROM - pg_locks AS l JOIN pg_dist_shard AS s -ON - l.objid = s.shardid -WHERE - logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND - pid = pg_backend_pid() -ORDER BY - 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test shard resource locks with INSERT/SELECT -BEGIN; -INSERT INTO partitioning_locks_2009 SELECT * FROM partitioning_locks WHERE time >= '2009-01-01' AND time < '2010-01-01'; -ERROR: relation "partitioning_locks_2009" does not exist -LINE 1: INSERT INTO partitioning_locks_2009 SELECT * FROM partitioni... - ^ --- see the locks on parent table -SELECT - logicalrelid, - locktype, - mode -FROM - pg_locks AS l JOIN pg_dist_shard AS s -ON - l.objid = s.shardid -WHERE - logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND - pid = pg_backend_pid() -ORDER BY - 1, 2, 3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- test partition-wise join -CREATE TABLE partitioning_hash_join_test(id int, subid int) PARTITION BY HASH(subid); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...LE partitioning_hash_join_test(id int, subid int) PARTITION ... - ^ -CREATE TABLE partitioning_hash_join_test_0 PARTITION OF partitioning_hash_join_test FOR VALUES WITH (MODULUS 3, REMAINDER 0); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_hash_join_test_0 PARTITION OF part... - ^ -CREATE TABLE partitioning_hash_join_test_1 PARTITION OF partitioning_hash_join_test FOR VALUES WITH (MODULUS 3, REMAINDER 1); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_hash_join_test_1 PARTITION OF part... - ^ -CREATE TABLE partitioning_hash_join_test_2 PARTITION OF partitioning_hash_join_test FOR VALUES WITH (MODULUS 3, REMAINDER 2); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_hash_join_test_2 PARTITION OF part... - ^ -SELECT create_distributed_table('partitioning_hash_join_test', 'id'); -ERROR: relation "partitioning_hash_join_test" does not exist -LINE 1: SELECT create_distributed_table('partitioning_hash_join_test... - ^ -SELECT success FROM run_command_on_workers('alter system set enable_mergejoin to off'); - success ---------- - t - t -(2 rows) - -SELECT success FROM run_command_on_workers('alter system set enable_nestloop to off'); - success ---------- - t - t -(2 rows) - -SELECT success FROM run_command_on_workers('alter system set enable_indexscan to off'); - success ---------- - t - t -(2 rows) - -SELECT success FROM run_command_on_workers('alter system set enable_indexonlyscan to off'); - success ---------- - t - t -(2 rows) - -SELECT success FROM run_command_on_workers('alter system set enable_partitionwise_join to off'); - success ---------- - f - f -(2 rows) - -SELECT success FROM run_command_on_workers('select pg_reload_conf()'); - success ---------- - t - t -(2 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, subid); -ERROR: relation "partitioning_hash_test" does not exist -LINE 2: SELECT * FROM partitioning_hash_test JOIN partitioning_hash_... - ^ --- set partition-wise join on -SELECT success FROM run_command_on_workers('alter system set enable_partitionwise_join to on'); - success ---------- - f - f -(2 rows) - -SELECT success FROM run_command_on_workers('select pg_reload_conf()'); - success ---------- - t - t -(2 rows) - -SET enable_partitionwise_join TO on; -ERROR: unrecognized configuration parameter "enable_partitionwise_join" -EXPLAIN (COSTS OFF) -SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, subid); -ERROR: relation "partitioning_hash_test" does not exist -LINE 2: SELECT * FROM partitioning_hash_test JOIN partitioning_hash_... - ^ --- note that partition-wise joins only work when partition key is in the join --- following join does not have that, therefore join will not be pushed down to --- partitions -EXPLAIN (COSTS OFF) -SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id); -ERROR: relation "partitioning_hash_test" does not exist -LINE 2: SELECT * FROM partitioning_hash_test JOIN partitioning_hash_... - ^ --- reset partition-wise join -SELECT success FROM run_command_on_workers('alter system reset enable_partitionwise_join'); - success ---------- - f - f -(2 rows) - -SELECT success FROM run_command_on_workers('alter system reset enable_mergejoin'); - success ---------- - t - t -(2 rows) - -SELECT success FROM run_command_on_workers('alter system reset enable_nestloop'); - success ---------- - t - t -(2 rows) - -SELECT success FROM run_command_on_workers('alter system reset enable_indexscan'); - success ---------- - t - t -(2 rows) - -SELECT success FROM run_command_on_workers('alter system reset enable_indexonlyscan'); - success ---------- - t - t -(2 rows) - -SELECT success FROM run_command_on_workers('select pg_reload_conf()'); - success ---------- - t - t -(2 rows) - -RESET enable_partitionwise_join; -ERROR: unrecognized configuration parameter "enable_partitionwise_join" -DROP TABLE -IF EXISTS - partitioning_test_2009, - partitioned_events_table, - partitioned_users_table, - list_partitioned_events_table, - multi_column_partitioning, - partitioning_locks, - partitioning_locks_for_select; -NOTICE: table "partitioning_test_2009" does not exist, skipping -NOTICE: table "partitioned_events_table" does not exist, skipping -NOTICE: table "partitioned_users_table" does not exist, skipping -NOTICE: table "list_partitioned_events_table" does not exist, skipping -NOTICE: table "multi_column_partitioning" does not exist, skipping -NOTICE: table "partitioning_locks" does not exist, skipping --- make sure we can create a partitioned table with streaming replication -SET citus.replication_model TO 'streaming'; -CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test(id int, time date) PARTITION ... - ^ -CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2009 PARTITION OF partitionin... - ^ -SELECT create_distributed_table('partitioning_test', 'id'); -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test', 'id'); - ^ -DROP TABLE partitioning_test; -ERROR: table "partitioning_test" does not exist --- make sure we can attach partitions to a distributed table in a schema -CREATE SCHEMA partitioning_schema; -CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...titioning_schema."schema-test"(id int, time date) PARTITION ... - ^ -SELECT create_distributed_table('partitioning_schema."schema-test"', 'id'); -ERROR: relation "partitioning_schema.schema-test" does not exist -LINE 1: SELECT create_distributed_table('partitioning_schema."schema... - ^ -CREATE TABLE partitioning_schema."schema-test_2009"(id int, time date); -ALTER TABLE partitioning_schema."schema-test" ATTACH PARTITION partitioning_schema."schema-test_2009" FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE partitioning_schema."schema-test" ATTACH PARTITI... - ^ --- attached partition is distributed as well -SELECT - logicalrelid -FROM - pg_dist_partition -WHERE - logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) -ORDER BY 1; -ERROR: relation "partitioning_schema.schema-test" does not exist -LINE 6: logicalrelid IN ('partitioning_schema."schema-test"'::regcl... - ^ -SELECT - logicalrelid, count(*) -FROM - pg_dist_shard -WHERE - logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) -GROUP BY - logicalrelid -ORDER BY - 1,2; -ERROR: relation "partitioning_schema.schema-test" does not exist -LINE 6: logicalrelid IN ('partitioning_schema."schema-test"'::re... - ^ -DROP TABLE partitioning_schema."schema-test"; -ERROR: table "schema-test" does not exist --- make sure we can create partition of a distributed table in a schema -CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...titioning_schema."schema-test"(id int, time date) PARTITION ... - ^ -SELECT create_distributed_table('partitioning_schema."schema-test"', 'id'); -ERROR: relation "partitioning_schema.schema-test" does not exist -LINE 1: SELECT create_distributed_table('partitioning_schema."schema... - ^ -CREATE TABLE partitioning_schema."schema-test_2009" PARTITION OF partitioning_schema."schema-test" FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...EATE TABLE partitioning_schema."schema-test_2009" PARTITION ... - ^ --- newly created partition is distributed as well -SELECT - logicalrelid -FROM - pg_dist_partition -WHERE - logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) -ORDER BY 1; -ERROR: relation "partitioning_schema.schema-test" does not exist -LINE 6: logicalrelid IN ('partitioning_schema."schema-test"'::regcl... - ^ -SELECT - logicalrelid, count(*) -FROM - pg_dist_shard -WHERE - logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) -GROUP BY - logicalrelid -ORDER BY - 1,2; -ERROR: relation "partitioning_schema.schema-test" does not exist -LINE 6: logicalrelid IN ('partitioning_schema."schema-test"'::re... - ^ -DROP TABLE partitioning_schema."schema-test"; -ERROR: table "schema-test" does not exist --- make sure creating partitioned tables works while search_path is set -CREATE TABLE partitioning_schema."schema-test"(id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...titioning_schema."schema-test"(id int, time date) PARTITION ... - ^ -SET search_path = partitioning_schema; -SELECT create_distributed_table('"schema-test"', 'id'); -ERROR: relation "schema-test" does not exist -LINE 1: SELECT create_distributed_table('"schema-test"', 'id'); - ^ -CREATE TABLE partitioning_schema."schema-test_2009" PARTITION OF "schema-test" FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...EATE TABLE partitioning_schema."schema-test_2009" PARTITION ... - ^ --- newly created partition is distributed as well -SELECT - logicalrelid -FROM - pg_dist_partition -WHERE - logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) -ORDER BY 1; -ERROR: relation "partitioning_schema.schema-test" does not exist -LINE 6: logicalrelid IN ('partitioning_schema."schema-test"'::regcl... - ^ -SELECT - logicalrelid, count(*) -FROM - pg_dist_shard -WHERE - logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass) -GROUP BY - logicalrelid -ORDER BY - 1,2; -ERROR: relation "partitioning_schema.schema-test" does not exist -LINE 6: logicalrelid IN ('partitioning_schema."schema-test"'::re... - ^ -DROP SCHEMA partitioning_schema CASCADE; -NOTICE: drop cascades to table "schema-test_2009" -RESET SEARCH_PATH; -DROP TABLE IF EXISTS - partitioning_hash_test, - partitioning_hash_join_test, - partitioning_test_failure, - non_distributed_partitioned_table, - partitioning_test_foreign_key; -NOTICE: table "partitioning_hash_test" does not exist, skipping -NOTICE: table "partitioning_hash_join_test" does not exist, skipping -NOTICE: table "partitioning_test_failure" does not exist, skipping -NOTICE: table "non_distributed_partitioned_table" does not exist, skipping diff --git a/src/test/regress/expected/multi_partitioning_utils_1.out b/src/test/regress/expected/multi_partitioning_utils_1.out deleted file mode 100644 index 8f6768dc9..000000000 --- a/src/test/regress/expected/multi_partitioning_utils_1.out +++ /dev/null @@ -1,372 +0,0 @@ --- This test has different output per major version -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int as server_major_version; - server_major_version ----------------------- - 9 -(1 row) - --- =================================================================== --- create test functions --- =================================================================== -CREATE FUNCTION generate_alter_table_detach_partition_command(regclass) - RETURNS text - AS 'citus' - LANGUAGE C STRICT; -CREATE FUNCTION generate_alter_table_attach_partition_command(regclass) - RETURNS text - AS 'citus' - LANGUAGE C STRICT; -CREATE FUNCTION generate_partition_information(regclass) - RETURNS text - AS 'citus' - LANGUAGE C STRICT; -CREATE FUNCTION print_partitions(regclass) - RETURNS text - AS 'citus' - LANGUAGE C STRICT; -CREATE FUNCTION table_inherits(regclass) - RETURNS bool - AS 'citus' - LANGUAGE C STRICT; -CREATE FUNCTION table_inherited(regclass) - RETURNS bool - AS 'citus' - LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION detach_and_attach_partition(partition_name regclass, parent_table_name regclass) -RETURNS void LANGUAGE plpgsql VOLATILE -AS $function$ -DECLARE - detach_partition_command text := ''; - attach_partition_command text := ''; - command_result text := ''; - -BEGIN - -- first generate the command - SELECT public.generate_alter_table_attach_partition_command(partition_name) INTO attach_partition_command; - - -- now genereate the detach command - SELECT public.generate_alter_table_detach_partition_command(partition_name) INTO detach_partition_command; - - -- later detach the same partition - EXECUTE detach_partition_command; - - -- not attach it again - EXECUTE attach_partition_command; -END; -$function$; -CREATE OR REPLACE FUNCTION drop_and_recreate_partitioned_table(parent_table_name regclass) -RETURNS void LANGUAGE plpgsql VOLATILE -AS $function$ -DECLARE - command text := ''; - -BEGIN - -- first generate the command - CREATE TABLE partitioned_table_create_commands AS SELECT master_get_table_ddl_events(parent_table_name::text); - - -- later detach the same partition - EXECUTE 'DROP TABLE ' || parent_table_name::text || ';'; - - FOR command IN SELECT * FROM partitioned_table_create_commands - LOOP - -- can do some processing here - EXECUTE command; - END LOOP; - - DROP TABLE partitioned_table_create_commands; - -END; -$function$; --- create a partitioned table -CREATE TABLE date_partitioned_table(id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...E TABLE date_partitioned_table(id int, time date) PARTITION ... - ^ --- we should be able to get the partitioning information even if there are no partitions -SELECT generate_partition_information('date_partitioned_table'); -ERROR: relation "date_partitioned_table" does not exist -LINE 1: SELECT generate_partition_information('date_partitioned_tabl... - ^ --- we should be able to drop and re-create the partitioned table using the command that Citus generate -SELECT drop_and_recreate_partitioned_table('date_partitioned_table'); -ERROR: relation "date_partitioned_table" does not exist -LINE 1: SELECT drop_and_recreate_partitioned_table('date_partitioned... - ^ --- we should also be able to see the PARTITION BY ... for the parent table -SELECT master_get_table_ddl_events('date_partitioned_table'); -ERROR: relation "date_partitioned_table" does not exist --- now create the partitions -CREATE TABLE date_partition_2006 PARTITION OF date_partitioned_table FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE date_partition_2006 PARTITION OF date_partition... - ^ -CREATE TABLE date_partition_2007 PARTITION OF date_partitioned_table FOR VALUES FROM ('2007-01-01') TO ('2008-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE date_partition_2007 PARTITION OF date_partition... - ^ --- we should be able to get the partitioning information after the partitions are created -SELECT generate_partition_information('date_partitioned_table'); -ERROR: relation "date_partitioned_table" does not exist -LINE 1: SELECT generate_partition_information('date_partitioned_tabl... - ^ --- lets get the attach partition commands -SELECT generate_alter_table_attach_partition_command('date_partition_2006'); -ERROR: relation "date_partition_2006" does not exist -LINE 1: ...ECT generate_alter_table_attach_partition_command('date_part... - ^ -SELECT generate_alter_table_attach_partition_command('date_partition_2007'); -ERROR: relation "date_partition_2007" does not exist -LINE 1: ...ECT generate_alter_table_attach_partition_command('date_part... - ^ --- detach and attach the partition by the command generated by us -\d+ date_partitioned_table -SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_table'); -ERROR: relation "date_partition_2007" does not exist -LINE 1: SELECT detach_and_attach_partition('date_partition_2007', 'd... - ^ --- check that both partitions are visiable -\d+ date_partitioned_table --- make sure that inter shard commands work as expected --- assume that the shardId is 100 -CREATE TABLE date_partitioned_table_100 (id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...LE date_partitioned_table_100 (id int, time date) PARTITION ... - ^ -CREATE TABLE date_partition_2007_100 (id int, time date ); --- now create the partitioning hierarcy -SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', - referenced_shard:=100, referenced_schema_name:='public', - command:='ALTER TABLE date_partitioned_table ATTACH PARTITION date_partition_2007 FOR VALUES FROM (''2007-01-01'') TO (''2008-01-02'')' ); -ERROR: syntax error at or near "ATTACH" -LINE 1: SELECT worker_apply_inter_shard_ddl_command(referencing_shar... - ^ --- the hierarcy is successfully created -\d+ date_partitioned_table_100 --- Citus can also get the DDL events for the partitions as regular tables -SELECT master_get_table_ddl_events('date_partition_2007_100'); - master_get_table_ddl_events ------------------------------------------------------------------------ - CREATE TABLE public.date_partition_2007_100 (id integer, "time" date) - ALTER TABLE public.date_partition_2007_100 OWNER TO postgres -(2 rows) - --- now break the partitioning hierarcy -SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', - referenced_shard:=100, referenced_schema_name:='public', - command:='ALTER TABLE date_partitioned_table DETACH PARTITION date_partition_2007' ); -ERROR: syntax error at or near "DETACH" -LINE 1: SELECT worker_apply_inter_shard_ddl_command(referencing_shar... - ^ --- the hierarcy is successfully broken -\d+ date_partitioned_table_100 --- now lets have some more complex partitioning hierarcies with --- tables on different schemas and constraints on the tables -CREATE SCHEMA partition_parent_schema; -CREATE TABLE partition_parent_schema.parent_table (id int NOT NULL, time date DEFAULT now()) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ..._table (id int NOT NULL, time date DEFAULT now()) PARTITION ... - ^ -CREATE SCHEMA partition_child_1_schema; -CREATE TABLE partition_child_1_schema.child_1 (id int NOT NULL, time date ); -CREATE SCHEMA partition_child_2_schema; -CREATE TABLE partition_child_2_schema.child_2 (id int NOT NULL, time date ); --- we should be able to get the partitioning information even if there are no partitions -SELECT generate_partition_information('partition_parent_schema.parent_table'); -ERROR: relation "partition_parent_schema.parent_table" does not exist -LINE 1: SELECT generate_partition_information('partition_parent_sche... - ^ --- we should be able to drop and re-create the partitioned table using the command that Citus generate -SELECT drop_and_recreate_partitioned_table('partition_parent_schema.parent_table'); -ERROR: relation "partition_parent_schema.parent_table" does not exist -LINE 1: SELECT drop_and_recreate_partitioned_table('partition_parent... - ^ -ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('2009-01-01') TO ('2010-01-02'); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE partition_parent_schema.parent_table ATTACH PART... - ^ -SET search_path = 'partition_parent_schema'; -ALTER TABLE parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE parent_table ATTACH PARTITION partition_child_2... - ^ -SELECT public.generate_partition_information('parent_table'); -ERROR: relation "parent_table" does not exist -LINE 1: SELECT public.generate_partition_information('parent_table')... - ^ --- lets get the attach partition commands -SELECT public.generate_alter_table_attach_partition_command('partition_child_1_schema.child_1'); - generate_alter_table_attach_partition_command ------------------------------------------------ - -(1 row) - -SET search_path = 'partition_child_2_schema'; -SELECT public.generate_alter_table_attach_partition_command('child_2'); - generate_alter_table_attach_partition_command ------------------------------------------------ - -(1 row) - -SET search_path = 'partition_parent_schema'; --- detach and attach the partition by the command generated by us -\d+ parent_table -SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'parent_table'); -ERROR: relation "parent_table" does not exist -LINE 1: ...ach_partition('partition_child_1_schema.child_1', 'parent_ta... - ^ --- check that both partitions are visiable -\d+ parent_table --- some very simple checks that should error out -SELECT public.generate_alter_table_attach_partition_command('parent_table'); -ERROR: relation "parent_table" does not exist -LINE 1: ...lic.generate_alter_table_attach_partition_command('parent_ta... - ^ -SELECT public.generate_partition_information('partition_child_1_schema.child_1'); - generate_partition_information --------------------------------- - -(1 row) - -SELECT public.print_partitions('partition_child_1_schema.child_1'); - print_partitions ------------------- - -(1 row) - --- now pring the partitions -SELECT public.print_partitions('parent_table'); -ERROR: relation "parent_table" does not exist -LINE 1: SELECT public.print_partitions('parent_table'); - ^ -SET search_path = 'public'; --- test multi column / expression partitioning with UNBOUNDED ranges -CREATE OR REPLACE FUNCTION some_function(input_val text) -RETURNS text LANGUAGE plpgsql IMMUTABLE -AS $function$ -BEGIN - return reverse(input_val); -END; -$function$; -CREATE TABLE multi_column_partitioned ( - a int, - b int, - c text - ) PARTITION BY RANGE (a, (a+b+1), some_function(upper(c))); -ERROR: syntax error at or near "PARTITION" -LINE 5: ) PARTITION BY RANGE (a, (a+b+1), some_function(upper(c))); - ^ -CREATE TABLE multi_column_partition_1( - a int, - b int, - c text -); -CREATE TABLE multi_column_partition_2( - a int, - b int, - c text -); --- partitioning information -SELECT generate_partition_information('multi_column_partitioned'); -ERROR: relation "multi_column_partitioned" does not exist -LINE 1: SELECT generate_partition_information('multi_column_partitio... - ^ -SELECT master_get_table_ddl_events('multi_column_partitioned'); -ERROR: relation "multi_column_partitioned" does not exist -SELECT drop_and_recreate_partitioned_table('multi_column_partitioned'); -ERROR: relation "multi_column_partitioned" does not exist -LINE 1: SELECT drop_and_recreate_partitioned_table('multi_column_par... - ^ --- partitions and their ranges -ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250'); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_... - ^ -SELECT generate_alter_table_attach_partition_command('multi_column_partition_1'); - generate_alter_table_attach_partition_command ------------------------------------------------ - -(1 row) - -ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_... - ^ -SELECT generate_alter_table_attach_partition_command('multi_column_partition_2'); - generate_alter_table_attach_partition_command ------------------------------------------------ - -(1 row) - -SELECT generate_alter_table_detach_partition_command('multi_column_partition_2'); - generate_alter_table_detach_partition_command ------------------------------------------------ - -(1 row) - --- finally a test with LIST partitioning -CREATE TABLE list_partitioned (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION BY LIST (col1) ; -ERROR: syntax error at or near "PARTITION" -LINE 1: ...ed (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION ... - ^ -SELECT generate_partition_information('list_partitioned'); -ERROR: relation "list_partitioned" does not exist -LINE 1: SELECT generate_partition_information('list_partitioned'); - ^ -SELECT master_get_table_ddl_events('list_partitioned'); -ERROR: relation "list_partitioned" does not exist -SELECT drop_and_recreate_partitioned_table('list_partitioned'); -ERROR: relation "list_partitioned" does not exist -LINE 1: SELECT drop_and_recreate_partitioned_table('list_partitioned... - ^ -CREATE TABLE list_partitioned_1 PARTITION OF list_partitioned FOR VALUES IN (100, 101, 102, 103, 104); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE list_partitioned_1 PARTITION OF list_partitione... - ^ -SELECT generate_alter_table_attach_partition_command('list_partitioned_1'); -ERROR: relation "list_partitioned_1" does not exist -LINE 1: ...ECT generate_alter_table_attach_partition_command('list_part... - ^ --- also differentiate partitions and inhereted tables -CREATE TABLE cities ( - name text, - population float, - altitude int -- in feet -); -CREATE TABLE capitals ( - state char(2) -) INHERITS (cities); --- returns true since capitals inherits from cities -SELECT table_inherits('capitals'); - table_inherits ----------------- - t -(1 row) - --- although date_partition_2006 inherits from its parent --- returns false since the hierarcy is formed via partitioning -SELECT table_inherits('date_partition_2006'); -ERROR: relation "date_partition_2006" does not exist -LINE 1: SELECT table_inherits('date_partition_2006'); - ^ --- returns true since cities inherited by capitals -SELECT table_inherited('cities'); - table_inherited ------------------ - t -(1 row) - --- although date_partitioned_table inherited by its partitions --- returns false since the hierarcy is formed via partitioning -SELECT table_inherited('date_partitioned_table'); -ERROR: relation "date_partitioned_table" does not exist -LINE 1: SELECT table_inherited('date_partitioned_table'); - ^ --- also these are not supported -SELECT master_get_table_ddl_events('capitals'); -ERROR: capitals is not a regular, foreign or partitioned table -SELECT master_get_table_ddl_events('cities'); -ERROR: cities is not a regular, foreign or partitioned table --- dropping parents frop the partitions -DROP TABLE date_partitioned_table, multi_column_partitioned, list_partitioned, partition_parent_schema.parent_table, cities, capitals; -ERROR: table "date_partitioned_table" does not exist diff --git a/src/test/regress/expected/multi_recursive_subquery_partitioning_0.out b/src/test/regress/expected/multi_recursive_subquery_partitioning_0.out deleted file mode 100644 index 3e3771511..000000000 --- a/src/test/regress/expected/multi_recursive_subquery_partitioning_0.out +++ /dev/null @@ -1,246 +0,0 @@ --- =================================================================== --- test recursive planning functionality on partitioned tables --- =================================================================== -CREATE SCHEMA subquery_and_partitioning; -SET search_path TO subquery_and_partitioning, public; -CREATE TABLE users_table_local AS SELECT * FROM users_table; -CREATE TABLE events_table_local AS SELECT * FROM events_table; -CREATE TABLE partitioning_test(id int, value_1 int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...partitioning_test(id int, value_1 int, time date) PARTITION ... - ^ - --- create its partitions -CREATE TABLE partitioning_test_2017 PARTITION OF partitioning_test FOR VALUES FROM ('2017-01-01') TO ('2018-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2017 PARTITION OF partitionin... - ^ -CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2010 PARTITION OF partitionin... - ^ --- load some data and distribute tables -INSERT INTO partitioning_test VALUES (1, 1, '2017-11-23'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES (1, 1, '2017-11-23'); - ^ -INSERT INTO partitioning_test VALUES (2, 1, '2010-07-07'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES (2, 1, '2010-07-07'); - ^ -INSERT INTO partitioning_test_2017 VALUES (3, 3, '2017-11-22'); -ERROR: relation "partitioning_test_2017" does not exist -LINE 1: INSERT INTO partitioning_test_2017 VALUES (3, 3, '2017-11-22... - ^ -INSERT INTO partitioning_test_2010 VALUES (4, 4, '2010-03-03'); -ERROR: relation "partitioning_test_2010" does not exist -LINE 1: INSERT INTO partitioning_test_2010 VALUES (4, 4, '2010-03-03... - ^ --- distribute partitioned table -SET citus.shard_replication_factor TO 1; -SELECT create_distributed_table('partitioning_test', 'id'); -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test', 'id'); - ^ -SET client_min_messages TO DEBUG1; --- subplan for partitioned tables -SELECT - id -FROM - (SELECT - DISTINCT partitioning_test.id - FROM - partitioning_test - LIMIT 5 - ) as foo - ORDER BY 1 DESC; -ERROR: relation "partitioning_test" does not exist -LINE 7: partitioning_test - ^ --- final query is router on partitioned tables -SELECT - * -FROM - (SELECT - DISTINCT partitioning_test.id - FROM - partitioning_test - LIMIT 5 - ) as foo, - (SELECT - DISTINCT partitioning_test.time - FROM - partitioning_test - LIMIT 5 - ) as bar - WHERE foo.id = date_part('day', bar.time) - ORDER BY 2 DESC, 1; -ERROR: relation "partitioning_test" does not exist -LINE 7: partitioning_test - ^ --- final query is real-time -SELECT - * -FROM - (SELECT - DISTINCT partitioning_test.time - FROM - partitioning_test - ORDER BY 1 DESC - LIMIT 5 - ) as foo, - ( - SELECT - DISTINCT partitioning_test.id - FROM - partitioning_test - ) as bar - WHERE date_part('day', foo.time) = bar.id - ORDER BY 2 DESC, 1 DESC - LIMIT 3; -ERROR: relation "partitioning_test" does not exist -LINE 7: partitioning_test - ^ --- final query is real-time that is joined with partitioned table -SELECT - * -FROM - (SELECT - DISTINCT partitioning_test.time - FROM - partitioning_test - ORDER BY 1 DESC - LIMIT 5 - ) as foo, - ( - SELECT - DISTINCT partitioning_test.id - FROM - partitioning_test - ) as bar, - partitioning_test - WHERE date_part('day', foo.time) = bar.id AND partitioning_test.id = bar.id - ORDER BY 2 DESC, 1 DESC - LIMIT 3; -ERROR: relation "partitioning_test" does not exist -LINE 7: partitioning_test - ^ --- subquery in WHERE clause -SELECT DISTINCT id -FROM partitioning_test -WHERE - id IN (SELECT DISTINCT date_part('day', time) FROM partitioning_test); -ERROR: relation "partitioning_test" does not exist -LINE 2: FROM partitioning_test - ^ --- repartition subquery -SET citus.enable_repartition_joins to ON; -SELECT - count(*) -FROM -( - SELECT DISTINCT p1.value_1 FROM partitioning_test as p1, partitioning_test as p2 WHERE p1.id = p2.value_1 -) as foo, -( - SELECT user_id FROM users_table -) as bar -WHERE foo.value_1 = bar.user_id; -ERROR: relation "partitioning_test" does not exist -LINE 5: SELECT DISTINCT p1.value_1 FROM partitioning_test as p1, pa... - ^ -SET citus.enable_repartition_joins to OFF; --- subquery, cte, view and non-partitioned tables -CREATE VIEW subquery_and_ctes AS -SELECT - * -FROM -( - WITH cte AS ( - WITH local_cte AS ( - SELECT * FROM users_table_local - ), - dist_cte AS ( - SELECT - user_id - FROM - events_table, - (SELECT DISTINCT value_1 FROM partitioning_test OFFSET 0) as foo - WHERE - events_table.user_id = foo.value_1 AND - events_table.user_id IN (SELECT DISTINCT value_1 FROM users_table ORDER BY 1 LIMIT 3) - ) - SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id -) -SELECT - count(*) as cnt -FROM - cte, - (SELECT - DISTINCT events_table.user_id - FROM - partitioning_test, events_table - WHERE - events_table.user_id = partitioning_test.id AND - event_type IN (1,2,3,4) - ORDER BY 1 DESC LIMIT 5 - ) as foo - WHERE foo.user_id = cte.user_id -) as foo, users_table WHERE foo.cnt > users_table.value_2; -ERROR: relation "partitioning_test" does not exist -LINE 15: (SELECT DISTINCT value_1 FROM partitioning_test OFFSET 0)... - ^ -SELECT * FROM subquery_and_ctes -ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC -LIMIT 5; -ERROR: relation "subquery_and_ctes" does not exist -LINE 1: SELECT * FROM subquery_and_ctes - ^ --- deep subquery, partitioned and non-partitioned tables together -SELECT count(*) -FROM -( - SELECT avg(min) FROM - ( - SELECT min(partitioning_test.value_1) FROM - ( - SELECT avg(event_type) as avg_ev_type FROM - ( - SELECT - max(value_1) as mx_val_1 - FROM ( - SELECT - avg(event_type) as avg - FROM - ( - SELECT - cnt - FROM - (SELECT count(*) as cnt, value_1 FROM partitioning_test GROUP BY value_1) as level_1, users_table - WHERE - users_table.user_id = level_1.cnt - ) as level_2, events_table - WHERE events_table.user_id = level_2.cnt - GROUP BY level_2.cnt - ) as level_3, users_table - WHERE user_id = level_3.avg - GROUP BY level_3.avg - ) as level_4, events_table - WHERE level_4.mx_val_1 = events_table.user_id - GROUP BY level_4.mx_val_1 - ) as level_5, partitioning_test - WHERE - level_5.avg_ev_type = partitioning_test.id - GROUP BY - level_5.avg_ev_type - ) as level_6, users_table WHERE users_table.user_id = level_6.min - GROUP BY users_table.value_1 - ) as bar; -ERROR: relation "partitioning_test" does not exist -LINE 20: (SELECT count(*) as cnt, value_1 FROM partitioning_... - ^ -SET client_min_messages TO DEFAULT; -DROP SCHEMA subquery_and_partitioning CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table users_table_local -drop cascades to table events_table_local -SET search_path TO public; diff --git a/src/test/regress/expected/multi_repartition_join_planning_0.out b/src/test/regress/expected/multi_repartition_join_planning_0.out deleted file mode 100644 index 7bddd359d..000000000 --- a/src/test/regress/expected/multi_repartition_join_planning_0.out +++ /dev/null @@ -1,208 +0,0 @@ --- --- MULTI_REPARTITION_JOIN_PLANNING --- --- Tests that cover repartition join planning. Note that we explicitly start a --- transaction block here so that we don't emit debug messages with changing --- transaction ids in them. Also, we set the executor type to task tracker --- executor here, as we cannot run repartition jobs with real time executor. -SET citus.next_shard_id TO 690000; -SET citus.enable_unique_job_ids TO off; --- print whether we're using version > 9 to make version-specific tests clear -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; - version_above_nine --------------------- - f -(1 row) - -BEGIN; -SET client_min_messages TO DEBUG4; -DEBUG: CommitTransactionCommand -SET citus.task_executor_type TO 'task-tracker'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand --- Debug4 log messages display jobIds within them. We explicitly set the jobId --- sequence here so that the regression output becomes independent of the number --- of jobs executed prior to running this test. --- Multi-level repartition join to verify our projection columns are correctly --- referenced and propagated across multiple repartition jobs. The test also --- validates that only the minimal necessary projection columns are transferred --- between jobs. -SELECT - l_partkey, o_orderkey, count(*) -FROM - lineitem, part_append, orders, customer_append -WHERE - l_orderkey = o_orderkey AND - l_partkey = p_partkey AND - c_custkey = o_custkey AND - (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND - p_size > 8 AND o_totalprice > 10.0 AND - c_acctbal < 5000.0 AND l_partkey < 1000 -GROUP BY - l_partkey, o_orderkey -ORDER BY - l_partkey, o_orderkey; -DEBUG: StartTransactionCommand -DEBUG: join prunable for intervals [1,5986] and [8997,14947] -DEBUG: join prunable for intervals [8997,14947] and [1,5986] -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290002 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290003 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: join prunable for intervals [1,1000] and [6001,7000] -DEBUG: join prunable for intervals [6001,7000] and [1,1000] -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT "pg_merge_job_0001.task_000003".intermediate_column_1_0, "pg_merge_job_0001.task_000003".intermediate_column_1_1, "pg_merge_job_0001.task_000003".intermediate_column_1_2, "pg_merge_job_0001.task_000003".intermediate_column_1_3, "pg_merge_job_0001.task_000003".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000003 "pg_merge_job_0001.task_000003" JOIN part_append_290005 part_append ON (("pg_merge_job_0001.task_000003".intermediate_column_1_0 OPERATOR(pg_catalog.=) part_append.p_partkey))) WHERE (part_append.p_size OPERATOR(pg_catalog.>) 8)" -DEBUG: generated sql query for task 4 -DETAIL: query string: "SELECT "pg_merge_job_0001.task_000006".intermediate_column_1_0, "pg_merge_job_0001.task_000006".intermediate_column_1_1, "pg_merge_job_0001.task_000006".intermediate_column_1_2, "pg_merge_job_0001.task_000006".intermediate_column_1_3, "pg_merge_job_0001.task_000006".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000006 "pg_merge_job_0001.task_000006" JOIN part_append_280002 part_append ON (("pg_merge_job_0001.task_000006".intermediate_column_1_0 OPERATOR(pg_catalog.=) part_append.p_partkey))) WHERE (part_append.p_size OPERATOR(pg_catalog.>) 8)" -DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 3 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 4 to node localhost:57638 -DEBUG: join prunable for intervals [1,1000] and [1001,2000] -DEBUG: join prunable for intervals [1,1000] and [6001,7000] -DEBUG: join prunable for intervals [1001,2000] and [1,1000] -DEBUG: join prunable for intervals [1001,2000] and [6001,7000] -DEBUG: join prunable for intervals [6001,7000] and [1,1000] -DEBUG: join prunable for intervals [6001,7000] and [1001,2000] -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT "pg_merge_job_0002.task_000005".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000005".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000005 "pg_merge_job_0002.task_000005" JOIN customer_append_290004 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000005".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000005".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000005".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000005".intermediate_column_2_0, "pg_merge_job_0002.task_000005".intermediate_column_2_1" -DEBUG: generated sql query for task 4 -DETAIL: query string: "SELECT "pg_merge_job_0002.task_000008".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000008".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000008 "pg_merge_job_0002.task_000008" JOIN customer_append_280001 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000008".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000008".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000008".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000008".intermediate_column_2_0, "pg_merge_job_0002.task_000008".intermediate_column_2_1" -DEBUG: generated sql query for task 6 -DETAIL: query string: "SELECT "pg_merge_job_0002.task_000011".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000011".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000011 "pg_merge_job_0002.task_000011" JOIN customer_append_280000 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000011".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000011".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000011".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000011".intermediate_column_2_0, "pg_merge_job_0002.task_000011".intermediate_column_2_1" -DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 5 -DEBUG: pruning merge fetch taskId 3 -DETAIL: Creating dependency on merge taskId 8 -DEBUG: pruning merge fetch taskId 5 -DETAIL: Creating dependency on merge taskId 11 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: completed cleanup query for job 3 -DEBUG: completed cleanup query for job 3 -DEBUG: completed cleanup query for job 2 -DEBUG: completed cleanup query for job 2 -DEBUG: completed cleanup query for job 1 -DEBUG: completed cleanup query for job 1 -DEBUG: CommitTransactionCommand - l_partkey | o_orderkey | count ------------+------------+------- - 18 | 12005 | 1 - 79 | 5121 | 1 - 91 | 2883 | 1 - 222 | 9413 | 1 - 278 | 1287 | 1 - 309 | 2374 | 1 - 318 | 321 | 1 - 321 | 5984 | 1 - 337 | 10403 | 1 - 350 | 13698 | 1 - 358 | 4323 | 1 - 364 | 9347 | 1 - 416 | 640 | 1 - 426 | 10855 | 1 - 450 | 35 | 1 - 484 | 3843 | 1 - 504 | 14566 | 1 - 510 | 13569 | 1 - 532 | 3175 | 1 - 641 | 134 | 1 - 669 | 10944 | 1 - 716 | 2885 | 1 - 738 | 4355 | 1 - 802 | 2534 | 1 - 824 | 9287 | 1 - 864 | 3175 | 1 - 957 | 4293 | 1 - 960 | 10980 | 1 - 963 | 4580 | 1 -(29 rows) - -SELECT - l_partkey, o_orderkey, count(*) -FROM - lineitem, orders -WHERE - l_suppkey = o_shippriority AND - l_quantity < 5.0 AND o_totalprice <> 4.0 -GROUP BY - l_partkey, o_orderkey -ORDER BY - l_partkey, o_orderkey; -DEBUG: StartTransactionCommand -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: generated sql query for task 1 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)" -DEBUG: generated sql query for task 2 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)" -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: join prunable for task partitionId 0 and 1 -DEBUG: join prunable for task partitionId 0 and 2 -DEBUG: join prunable for task partitionId 0 and 3 -DEBUG: join prunable for task partitionId 1 and 0 -DEBUG: join prunable for task partitionId 1 and 2 -DEBUG: join prunable for task partitionId 1 and 3 -DEBUG: join prunable for task partitionId 2 and 0 -DEBUG: join prunable for task partitionId 2 and 1 -DEBUG: join prunable for task partitionId 2 and 3 -DEBUG: join prunable for task partitionId 3 and 0 -DEBUG: join prunable for task partitionId 3 and 1 -DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: generated sql query for task 3 -DETAIL: query string: "SELECT "pg_merge_job_0004.task_000003".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000003".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000003 "pg_merge_job_0004.task_000003" JOIN pg_merge_job_0005.task_000003 "pg_merge_job_0005.task_000003" ON (("pg_merge_job_0004.task_000003".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000003".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000003".intermediate_column_4_0, "pg_merge_job_0005.task_000003".intermediate_column_5_0" -DEBUG: generated sql query for task 6 -DETAIL: query string: "SELECT "pg_merge_job_0004.task_000006".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000006".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000006 "pg_merge_job_0004.task_000006" JOIN pg_merge_job_0005.task_000006 "pg_merge_job_0005.task_000006" ON (("pg_merge_job_0004.task_000006".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000006".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000006".intermediate_column_4_0, "pg_merge_job_0005.task_000006".intermediate_column_5_0" -DEBUG: generated sql query for task 9 -DETAIL: query string: "SELECT "pg_merge_job_0004.task_000009".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000009".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000009 "pg_merge_job_0004.task_000009" JOIN pg_merge_job_0005.task_000009 "pg_merge_job_0005.task_000009" ON (("pg_merge_job_0004.task_000009".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000009".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000009".intermediate_column_4_0, "pg_merge_job_0005.task_000009".intermediate_column_5_0" -DEBUG: generated sql query for task 12 -DETAIL: query string: "SELECT "pg_merge_job_0004.task_000012".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000012".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000012 "pg_merge_job_0004.task_000012" JOIN pg_merge_job_0005.task_000012 "pg_merge_job_0005.task_000012" ON (("pg_merge_job_0004.task_000012".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000012".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000012".intermediate_column_4_0, "pg_merge_job_0005.task_000012".intermediate_column_5_0" -DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 2 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 5 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 7 -DETAIL: Creating dependency on merge taskId 9 -DEBUG: pruning merge fetch taskId 8 -DETAIL: Creating dependency on merge taskId 9 -DEBUG: pruning merge fetch taskId 10 -DETAIL: Creating dependency on merge taskId 12 -DEBUG: pruning merge fetch taskId 11 -DETAIL: Creating dependency on merge taskId 12 -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 12 to node localhost:57638 -DEBUG: completed cleanup query for job 6 -DEBUG: completed cleanup query for job 6 -DEBUG: completed cleanup query for job 4 -DEBUG: completed cleanup query for job 4 -DEBUG: completed cleanup query for job 5 -DEBUG: completed cleanup query for job 5 -DEBUG: CommitTransactionCommand - l_partkey | o_orderkey | count ------------+------------+------- -(0 rows) - --- Reset client logging level to its previous value -SET client_min_messages TO NOTICE; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -COMMIT; diff --git a/src/test/regress/expected/multi_repartition_join_task_assignment_0.out b/src/test/regress/expected/multi_repartition_join_task_assignment_0.out deleted file mode 100644 index aa9b467b7..000000000 --- a/src/test/regress/expected/multi_repartition_join_task_assignment_0.out +++ /dev/null @@ -1,141 +0,0 @@ --- --- MULTI_REPARTITION_JOIN_TASK_ASSIGNMENT --- --- Tests which cover task assignment for MapMerge jobs for single range repartition --- and dual hash repartition joins. The tests also cover task assignment propagation --- from a sql task to its depended tasks. Note that we set the executor type to task --- tracker executor here, as we cannot run repartition jobs with real time executor. -SET citus.next_shard_id TO 710000; --- print whether we're using version > 9 to make version-specific tests clear -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; - version_above_nine --------------------- - f -(1 row) - -BEGIN; -SET client_min_messages TO DEBUG3; -DEBUG: CommitTransactionCommand -SET citus.task_executor_type TO 'task-tracker'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand --- Single range repartition join to test anchor-shard based task assignment and --- assignment propagation to merge and data-fetch tasks. -SELECT - count(*) -FROM - orders, customer_append -WHERE - o_custkey = c_custkey; -DEBUG: StartTransactionCommand -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: join prunable for intervals [1,1000] and [1001,2000] -DEBUG: join prunable for intervals [1,1000] and [6001,7000] -DEBUG: join prunable for intervals [1001,2000] and [1,1000] -DEBUG: join prunable for intervals [1001,2000] and [6001,7000] -DEBUG: join prunable for intervals [6001,7000] and [1,1000] -DEBUG: join prunable for intervals [6001,7000] and [1001,2000] -DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 3 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 5 -DETAIL: Creating dependency on merge taskId 9 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: CommitTransactionCommand - count -------- - 2985 -(1 row) - --- Single range repartition join, along with a join with a small table containing --- more than one shard. This situation results in multiple sql tasks depending on --- the same merge task, and tests our constraint group creation and assignment --- propagation. -SELECT - count(*) -FROM - orders_reference, customer_append, lineitem -WHERE - o_custkey = c_custkey AND - o_orderkey = l_orderkey; -DEBUG: StartTransactionCommand -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: join prunable for intervals [1,5986] and [8997,14947] -DEBUG: join prunable for intervals [8997,14947] and [1,5986] -DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 4 -DEBUG: pruning merge fetch taskId 3 -DETAIL: Creating dependency on merge taskId 8 -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 -DEBUG: CommitTransactionCommand - count -------- - 12000 -(1 row) - --- Dual hash repartition join which tests the separate hash repartition join --- task assignment algorithm. -SELECT - count(*) -FROM - lineitem, customer_append -WHERE - l_partkey = c_nationkey; -DEBUG: StartTransactionCommand -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: join prunable for task partitionId 0 and 1 -DEBUG: join prunable for task partitionId 0 and 2 -DEBUG: join prunable for task partitionId 0 and 3 -DEBUG: join prunable for task partitionId 1 and 0 -DEBUG: join prunable for task partitionId 1 and 2 -DEBUG: join prunable for task partitionId 1 and 3 -DEBUG: join prunable for task partitionId 2 and 0 -DEBUG: join prunable for task partitionId 2 and 1 -DEBUG: join prunable for task partitionId 2 and 3 -DEBUG: join prunable for task partitionId 3 and 0 -DEBUG: join prunable for task partitionId 3 and 1 -DEBUG: join prunable for task partitionId 3 and 2 -DEBUG: pruning merge fetch taskId 1 -DETAIL: Creating dependency on merge taskId 3 -DEBUG: pruning merge fetch taskId 2 -DETAIL: Creating dependency on merge taskId 4 -DEBUG: pruning merge fetch taskId 4 -DETAIL: Creating dependency on merge taskId 6 -DEBUG: pruning merge fetch taskId 5 -DETAIL: Creating dependency on merge taskId 8 -DEBUG: pruning merge fetch taskId 7 -DETAIL: Creating dependency on merge taskId 9 -DEBUG: pruning merge fetch taskId 8 -DETAIL: Creating dependency on merge taskId 12 -DEBUG: pruning merge fetch taskId 10 -DETAIL: Creating dependency on merge taskId 12 -DEBUG: pruning merge fetch taskId 11 -DETAIL: Creating dependency on merge taskId 16 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 9 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 -DEBUG: CommitTransactionCommand - count -------- - 125 -(1 row) - --- Reset client logging level to its previous value -SET client_min_messages TO NOTICE; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -COMMIT; diff --git a/src/test/regress/expected/multi_task_assignment_policy_0.out b/src/test/regress/expected/multi_task_assignment_policy_0.out deleted file mode 100644 index 3aeb624e2..000000000 --- a/src/test/regress/expected/multi_task_assignment_policy_0.out +++ /dev/null @@ -1,289 +0,0 @@ --- --- MULTI_TASK_ASSIGNMENT --- -SET citus.next_shard_id TO 880000; --- print whether we're using version > 9 to make version-specific tests clear -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; - version_above_nine --------------------- - f -(1 row) - -SET citus.explain_distributed_queries TO off; --- Check that our policies for assigning tasks to worker nodes run as expected. --- To test this, we first create a shell table, and then manually insert shard --- and shard placement data into system catalogs. We next run Explain command, --- and check that tasks are assigned to worker nodes as expected. -CREATE TABLE task_assignment_test_table (test_id integer); -SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append'); - create_distributed_table --------------------------- - -(1 row) - --- Create logical shards with shardids 200, 201, and 202 -INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) - SELECT pg_class.oid, series.index, 'r', 1, 1000 - FROM pg_class, generate_series(200, 202) AS series(index) - WHERE pg_class.relname = 'task_assignment_test_table'; --- Create shard placements for shard 200 and 201 -INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) - SELECT 200, 1, 1, nodename, nodeport - FROM pg_dist_shard_placement - GROUP BY nodename, nodeport - ORDER BY nodename, nodeport ASC - LIMIT 2; -INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) - SELECT 201, 1, 1, nodename, nodeport - FROM pg_dist_shard_placement - GROUP BY nodename, nodeport - ORDER BY nodename, nodeport ASC - LIMIT 2; --- Create shard placements for shard 202 -INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) - SELECT 202, 1, 1, nodename, nodeport - FROM pg_dist_shard_placement - GROUP BY nodename, nodeport - ORDER BY nodename, nodeport DESC - LIMIT 2; --- Start transaction block to avoid auto commits. This avoids additional debug --- messages from getting printed at real transaction starts and commits. -BEGIN; --- Increase log level to see which worker nodes tasks are assigned to. Note that --- the following log messages print node name and port numbers; and node numbers --- in regression tests depend upon PG_VERSION_NUM. -SET client_min_messages TO DEBUG3; -DEBUG: CommitTransactionCommand --- First test the default greedy task assignment policy -SET citus.task_assignment_policy TO 'greedy'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: CommitTransactionCommand - QUERY PLAN ------------------------------------------------------------------------ - Aggregate (cost=0.00..0.00 rows=0 width=0) - -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) - explain statements for distributed queries are not enabled -(3 rows) - -EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: CommitTransactionCommand - QUERY PLAN ------------------------------------------------------------------------ - Aggregate (cost=0.00..0.00 rows=0 width=0) - -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) - explain statements for distributed queries are not enabled -(3 rows) - --- Next test the first-replica task assignment policy -SET citus.task_assignment_policy TO 'first-replica'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: CommitTransactionCommand - QUERY PLAN ------------------------------------------------------------------------ - Aggregate (cost=0.00..0.00 rows=0 width=0) - -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) - explain statements for distributed queries are not enabled -(3 rows) - -EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: CommitTransactionCommand - QUERY PLAN ------------------------------------------------------------------------ - Aggregate (cost=0.00..0.00 rows=0 width=0) - -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) - explain statements for distributed queries are not enabled -(3 rows) - --- Finally test the round-robin task assignment policy -SET citus.task_assignment_policy TO 'round-robin'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57638 -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: CommitTransactionCommand - QUERY PLAN ------------------------------------------------------------------------ - Aggregate (cost=0.00..0.00 rows=0 width=0) - -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) - explain statements for distributed queries are not enabled -(3 rows) - -EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: assigned task 3 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57637 -DEBUG: assigned task 1 to node localhost:57638 -DEBUG: CommitTransactionCommand - QUERY PLAN ------------------------------------------------------------------------ - Aggregate (cost=0.00..0.00 rows=0 width=0) - -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) - explain statements for distributed queries are not enabled -(3 rows) - -EXPLAIN SELECT count(*) FROM task_assignment_test_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 2 to node localhost:57638 -DEBUG: assigned task 1 to node localhost:57637 -DEBUG: CommitTransactionCommand - QUERY PLAN ------------------------------------------------------------------------ - Aggregate (cost=0.00..0.00 rows=0 width=0) - -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) - explain statements for distributed queries are not enabled -(3 rows) - -RESET citus.task_assignment_policy; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -RESET client_min_messages; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -COMMIT; -BEGIN; -SET LOCAL client_min_messages TO DEBUG3; -DEBUG: CommitTransactionCommand -SET LOCAL citus.explain_distributed_queries TO off; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand --- Check how task_assignment_policy impact planning decisions for reference tables -CREATE TABLE task_assignment_reference_table (test_id integer); -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -SELECT create_reference_table('task_assignment_reference_table'); -DEBUG: StartTransactionCommand -DEBUG: CommitTransactionCommand - create_reference_table ------------------------- - -(1 row) - -SET LOCAL citus.task_assignment_policy TO 'greedy'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: Creating router plan -DEBUG: Plan is router executable -DEBUG: CommitTransactionCommand - QUERY PLAN --------------------------------------------------------------- - Custom Scan (Citus Router) - explain statements for distributed queries are not enabled -(2 rows) - -EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: Creating router plan -DEBUG: Plan is router executable -DEBUG: CommitTransactionCommand - QUERY PLAN --------------------------------------------------------------- - Custom Scan (Citus Router) - explain statements for distributed queries are not enabled -(2 rows) - -SET LOCAL citus.task_assignment_policy TO 'first-replica'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: Creating router plan -DEBUG: Plan is router executable -DEBUG: CommitTransactionCommand - QUERY PLAN --------------------------------------------------------------- - Custom Scan (Citus Router) - explain statements for distributed queries are not enabled -(2 rows) - -EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: Creating router plan -DEBUG: Plan is router executable -DEBUG: CommitTransactionCommand - QUERY PLAN --------------------------------------------------------------- - Custom Scan (Citus Router) - explain statements for distributed queries are not enabled -(2 rows) - --- here we expect debug output showing two different hosts for subsequent queries -SET LOCAL citus.task_assignment_policy TO 'round-robin'; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand -EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: assigned task 0 to node localhost:57637 -DEBUG: Creating router plan -DEBUG: Plan is router executable -DEBUG: CommitTransactionCommand - QUERY PLAN --------------------------------------------------------------- - Custom Scan (Citus Router) - explain statements for distributed queries are not enabled -(2 rows) - -EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: assigned task 0 to node localhost:57638 -DEBUG: Creating router plan -DEBUG: Plan is router executable -DEBUG: CommitTransactionCommand - QUERY PLAN --------------------------------------------------------------- - Custom Scan (Citus Router) - explain statements for distributed queries are not enabled -(2 rows) - -ROLLBACK; -DEBUG: StartTransactionCommand -DEBUG: ProcessUtility -DEBUG: CommitTransactionCommand diff --git a/src/test/regress/expected/relation_access_tracking_0.out b/src/test/regress/expected/relation_access_tracking_0.out deleted file mode 100644 index b9cc53355..000000000 --- a/src/test/regress/expected/relation_access_tracking_0.out +++ /dev/null @@ -1,930 +0,0 @@ ---- ---- tests around access tracking within transaction blocks ---- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 10 AS version_ten_or_above; - version_ten_or_above ----------------------- - f -(1 row) - -CREATE SCHEMA access_tracking; -SET search_path TO 'access_tracking'; -CREATE OR REPLACE FUNCTION relation_select_access_mode(relationId Oid) - RETURNS int - LANGUAGE C STABLE STRICT - AS 'citus', $$relation_select_access_mode$$; -CREATE OR REPLACE FUNCTION relation_dml_access_mode(relationId Oid) - RETURNS int - LANGUAGE C STABLE STRICT - AS 'citus', $$relation_dml_access_mode$$; -CREATE OR REPLACE FUNCTION relation_ddl_access_mode(relationId Oid) - RETURNS int - LANGUAGE C STABLE STRICT - AS 'citus', $$relation_ddl_access_mode$$; -CREATE OR REPLACE FUNCTION relation_access_mode_to_text(relationShardAccess int) -RETURNS text AS -$$ -BEGIN - IF relationShardAccess = 0 THEN - RETURN 'not_accessed'; - ELSIF relationShardAccess = 1 THEN - RETURN 'sequential_access'; - ELSE - RETURN 'parallel_access'; - END IF; -END; -$$ LANGUAGE 'plpgsql' IMMUTABLE; -CREATE VIEW relation_acesses AS - SELECT table_name, - relation_access_mode_to_text(relation_select_access_mode(table_name::regclass)) as select_access, - relation_access_mode_to_text(relation_dml_access_mode(table_name::regclass)) as dml_access, - relation_access_mode_to_text(relation_ddl_access_mode(table_name::regclass)) as ddl_access - FROM - ((SELECT 'table_' || i as table_name FROM generate_series(1, 7) i) UNION (SELECT 'partitioning_test') UNION (SELECT 'partitioning_test_2009') UNION (SELECT 'partitioning_test_2010')) tables; -SET citus.shard_replication_factor TO 1; -CREATE TABLE table_1 (key int, value int); -SELECT create_distributed_table('table_1', 'key'); - create_distributed_table --------------------------- - -(1 row) - -CREATE TABLE table_2 (key int, value int); -SELECT create_distributed_table('table_2', 'key'); - create_distributed_table --------------------------- - -(1 row) - -CREATE TABLE table_3 (key int, value int); -SELECT create_distributed_table('table_3', 'key'); - create_distributed_table --------------------------- - -(1 row) - -CREATE TABLE table_4 (key int, value int); -SELECT create_distributed_table('table_4', 'key'); - create_distributed_table --------------------------- - -(1 row) - -CREATE TABLE table_5 (key int, value int); -SELECT create_distributed_table('table_5', 'key'); - create_distributed_table --------------------------- - -(1 row) - -CREATE TABLE table_6 (key int, value int); -SELECT create_reference_Table('table_6'); - create_reference_table ------------------------- - -(1 row) - -INSERT INTO table_1 SELECT i, i FROM generate_series(0,100) i; -INSERT INTO table_2 SELECT i, i FROM generate_series(0,100) i; -INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i; -INSERT INTO table_4 SELECT i, i FROM generate_series(0,100) i; -INSERT INTO table_5 SELECT i, i FROM generate_series(0,100) i; -INSERT INTO table_6 SELECT i, i FROM generate_series(0,100) i; --- create_distributed_table works fine -BEGIN; - CREATE TABLE table_7 (key int, value int); - SELECT create_distributed_table('table_7', 'key'); - create_distributed_table --------------------------- - -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_7') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+----------------- - table_7 | not_accessed | not_accessed | parallel_access -(1 row) - -COMMIT; --- outisde the transaction blocks, the function always returns zero -SELECT count(*) FROM table_1; - count -------- - 101 -(1 row) - -SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+-------------- - table_1 | not_accessed | not_accessed | not_accessed -(1 row) - --- a very simple test that first checks sequential --- and parallel SELECTs,DMLs, and DDLs -BEGIN; - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+-------------- - table_1 | not_accessed | not_accessed | not_accessed -(1 row) - - SELECT count(*) FROM table_1 WHERE key = 1; - count -------- - 1 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-------------------+--------------+-------------- - table_1 | sequential_access | not_accessed | not_accessed -(1 row) - - SELECT count(*) FROM table_1 WHERE key = 1 OR key = 2; - count -------- - 2 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------+--------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed -(1 row) - - INSERT INTO table_1 VALUES (1,1); - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-------------------+-------------- - table_1 | parallel_access | sequential_access | not_accessed -(1 row) - - INSERT INTO table_1 VALUES (1,1), (2,2); - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-------------------+-------------- - table_1 | parallel_access | sequential_access | not_accessed -(1 row) - - ALTER TABLE table_1 ADD COLUMN test_col INT; - -- now see that the other tables are not accessed at all - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-------------------+----------------- - table_1 | parallel_access | sequential_access | parallel_access -(1 row) - -ROLLBACK; --- this test shows that even if two multiple single shard --- commands executed, we can treat the transaction as sequential -BEGIN; - SELECT count(*) FROM table_1 WHERE key = 1; - count -------- - 1 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-------------------+--------------+-------------- - table_1 | sequential_access | not_accessed | not_accessed -(1 row) - - SELECT count(*) FROM table_1 WHERE key = 2; - count -------- - 1 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-------------------+--------------+-------------- - table_1 | sequential_access | not_accessed | not_accessed -(1 row) - - INSERT INTO table_1 VALUES (1,1); - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-------------------+-------------------+-------------- - table_1 | sequential_access | sequential_access | not_accessed -(1 row) - - INSERT INTO table_1 VALUES (2,2); - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-------------------+-------------------+-------------- - table_1 | sequential_access | sequential_access | not_accessed -(1 row) - -ROLLBACK; --- a sample DDL example -BEGIN; - ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+----------------- - table_1 | not_accessed | not_accessed | parallel_access -(1 row) - -ROLLBACK; --- a simple join touches single shard per table -BEGIN; - SELECT - count(*) - FROM - table_1, table_2, table_3, table_4, table_5 - WHERE - table_1.key = table_2.key AND table_2.key = table_3.key AND - table_3.key = table_4.key AND table_4.key = table_5.key AND - table_1.key = 1; - count -------- - 1 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name LIKE 'table_%' ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-------------------+--------------+-------------- - table_1 | sequential_access | not_accessed | not_accessed - table_2 | sequential_access | not_accessed | not_accessed - table_3 | sequential_access | not_accessed | not_accessed - table_4 | sequential_access | not_accessed | not_accessed - table_5 | sequential_access | not_accessed | not_accessed - table_6 | not_accessed | not_accessed | not_accessed - table_7 | not_accessed | not_accessed | not_accessed -(7 rows) - -ROLLBACK; --- a simple real-time join touches all shard per table -BEGIN; - SELECT - count(*) - FROM - table_1, table_2 - WHERE - table_1.key = table_2.key; - count -------- - 101 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+--------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed - table_2 | parallel_access | not_accessed | not_accessed -(2 rows) - -ROLLBACK; --- a simple real-time join touches all shard per table --- in sequential mode -BEGIN; - SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; - SELECT - count(*) - FROM - table_1, table_2 - WHERE - table_1.key = table_2.key; - count -------- - 101 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-------------------+--------------+-------------- - table_1 | sequential_access | not_accessed | not_accessed - table_2 | sequential_access | not_accessed | not_accessed -(2 rows) - -ROLLBACK; --- a simple subquery pushdown that touches all shards -BEGIN; - SELECT - count(*) - FROM - ( - SELECT - random() - FROM - table_1, table_2, table_3, table_4, table_5 - WHERE - table_1.key = table_2.key AND table_2.key = table_3.key AND - table_3.key = table_4.key AND table_4.key = table_5.key - ) as foo; - count -------- - 101 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name LIKE 'table_%' ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+--------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed - table_2 | parallel_access | not_accessed | not_accessed - table_3 | parallel_access | not_accessed | not_accessed - table_4 | parallel_access | not_accessed | not_accessed - table_5 | parallel_access | not_accessed | not_accessed - table_6 | not_accessed | not_accessed | not_accessed - table_7 | not_accessed | not_accessed | not_accessed -(7 rows) - -ROLLBACK; --- simple multi shard update both sequential and parallel modes --- note that in multi shard modify mode we always add select --- access for all the shards accessed. But, sequential mode is OK -BEGIN; - UPDATE table_1 SET value = 15; - SELECT * FROM relation_acesses WHERE table_name = 'table_1'; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+-------------- - table_1 | parallel_access | parallel_access | not_accessed -(1 row) - - SET LOCAL citus.multi_shard_modify_mode = 'sequential'; - UPDATE table_2 SET value = 15; - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-------------------+-------------------+-------------- - table_1 | parallel_access | parallel_access | not_accessed - table_2 | sequential_access | sequential_access | not_accessed -(2 rows) - -ROLLBACK; --- now UPDATE/DELETE with subselect pushdown -BEGIN; - UPDATE - table_1 SET value = 15 - WHERE key IN (SELECT key FROM table_2 JOIN table_3 USING (key) WHERE table_2.value = 15); - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+-------------- - table_1 | parallel_access | parallel_access | not_accessed - table_2 | parallel_access | not_accessed | not_accessed - table_3 | parallel_access | not_accessed | not_accessed -(3 rows) - -ROLLBACK; --- INSERT .. SELECT pushdown -BEGIN; - INSERT INTO table_2 SELECT * FROM table_1; - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed - table_2 | not_accessed | parallel_access | not_accessed -(2 rows) - -ROLLBACK; --- INSERT .. SELECT pushdown in sequential mode should be OK -BEGIN; - SET LOCAL citus.multi_shard_modify_mode = 'sequential'; - INSERT INTO table_2 SELECT * FROM table_1; - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-------------------+-------------------+-------------- - table_1 | sequential_access | not_accessed | not_accessed - table_2 | not_accessed | sequential_access | not_accessed -(2 rows) - -ROLLBACK; --- coordinator INSERT .. SELECT -BEGIN; - INSERT INTO table_2 SELECT * FROM table_1 OFFSET 0; - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed - table_2 | not_accessed | parallel_access | not_accessed -(2 rows) - -ROLLBACK; - --- recursively planned SELECT -BEGIN; - SELECT - count(*) - FROM - ( - SELECT - random() - FROM - table_1, table_2 - WHERE - table_1.key = table_2.key - OFFSET 0 - ) as foo; - count -------- - 101 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+--------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed - table_2 | parallel_access | not_accessed | not_accessed -(2 rows) - -ROLLBACK; --- recursively planned SELECT and coordinator INSERT .. SELECT -BEGIN; - INSERT INTO table_3 (key) - SELECT - * - FROM - ( - SELECT - random() * 1000 - FROM - table_1, table_2 - WHERE - table_1.key = table_2.key - OFFSET 0 - ) as foo; - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed - table_2 | parallel_access | not_accessed | not_accessed - table_3 | not_accessed | parallel_access | not_accessed -(3 rows) - -ROLLBACK; --- recursively planned SELECT and coordinator INSERT .. SELECT --- but modifies single shard, marked as sequential operation -BEGIN; - INSERT INTO table_3 (key) - SELECT - * - FROM - ( - SELECT - random() * 1000 - FROM - table_1, table_2 - WHERE - table_1.key = table_2.key - AND table_1.key = 1 - OFFSET 0 - ) as foo; - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-------------------+-------------------+-------------- - table_1 | sequential_access | not_accessed | not_accessed - table_2 | sequential_access | not_accessed | not_accessed - table_3 | not_accessed | sequential_access | not_accessed -(3 rows) - -ROLLBACK; --- recursively planned SELECT and recursively planned multi-shard DELETE -BEGIN; - DELETE FROM table_3 where key IN - ( - SELECT - * - FROM - ( - SELECT - table_1.key - FROM - table_1, table_2 - WHERE - table_1.key = table_2.key - OFFSET 0 - ) as foo - ) AND value IN (SELECT key FROM table_4); - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3', 'table_4') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed - table_2 | parallel_access | not_accessed | not_accessed - table_3 | parallel_access | parallel_access | not_accessed - table_4 | parallel_access | not_accessed | not_accessed -(4 rows) - -ROLLBACK; --- copy out -BEGIN; - COPY (SELECT * FROM table_1 WHERE key IN (1,2,3) ORDER BY 1) TO stdout; -1 1 -2 2 -3 3 - SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+--------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed -(1 row) - -ROLLBACK; --- copy in -BEGIN; - COPY table_1 FROM STDIN WITH CSV; - SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+-----------------+-------------- - table_1 | not_accessed | parallel_access | not_accessed -(1 row) - -ROLLBACK; --- copy in single shard -BEGIN; - COPY table_1 FROM STDIN WITH CSV; - SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+-------------------+-------------- - table_1 | not_accessed | sequential_access | not_accessed -(1 row) - -ROLLBACK; --- reference table accesses should always be a sequential -BEGIN; - SELECT count(*) FROM table_6; - count -------- - 101 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); - table_name | select_access | dml_access | ddl_access -------------+-------------------+--------------+-------------- - table_6 | sequential_access | not_accessed | not_accessed -(1 row) - - UPDATE table_6 SET value = 15; - SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); - table_name | select_access | dml_access | ddl_access -------------+-------------------+-------------------+-------------- - table_6 | sequential_access | sequential_access | not_accessed -(1 row) - - ALTER TABLE table_6 ADD COLUMN x INT; - SELECT * FROM relation_acesses WHERE table_name IN ('table_6'); - table_name | select_access | dml_access | ddl_access -------------+-------------------+-------------------+------------------- - table_6 | sequential_access | sequential_access | sequential_access -(1 row) - -ROLLBACK; --- reference table join with a distributed table -BEGIN; - SELECT count(*) FROM table_1 JOIN table_6 USING(key); - count -------- - 101 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_6', 'table_1') ORDER BY 1,2; - table_name | select_access | dml_access | ddl_access -------------+-----------------+--------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed - table_6 | parallel_access | not_accessed | not_accessed -(2 rows) - -ROLLBACK; --- TRUNCATE should be DDL -BEGIN; - TRUNCATE table_1; - SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+----------------- - table_1 | not_accessed | not_accessed | parallel_access -(1 row) - -ROLLBACK; --- TRUNCATE can be a sequential DDL -BEGIN; - SET LOCAL citus.multi_shard_modify_mode = 'sequential'; - TRUNCATE table_1; - SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+------------------- - table_1 | not_accessed | not_accessed | sequential_access -(1 row) - -ROLLBACK; --- TRUNCATE on a reference table should be sequential -BEGIN; - TRUNCATE table_6; - SELECT * FROM relation_acesses WHERE table_name IN ('table_6') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+------------------- - table_6 | not_accessed | not_accessed | sequential_access -(1 row) - -ROLLBACK; --- creating foreign keys should consider adding the placement accesses for the referenced table -ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key); -BEGIN; - ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+----------------- - table_1 | not_accessed | not_accessed | parallel_access - table_2 | not_accessed | not_accessed | parallel_access -(2 rows) - -ROLLBACK; --- creating foreign keys should consider adding the placement accesses for the referenced table --- in sequential mode as well -BEGIN; - SET LOCAL citus.multi_shard_modify_mode = 'sequential'; - ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+------------------- - table_1 | not_accessed | not_accessed | sequential_access - table_2 | not_accessed | not_accessed | sequential_access -(2 rows) - -ROLLBACK; -CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test(id int, time date) PARTITION ... - ^ -SELECT create_distributed_table('partitioning_test', 'id'); -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test', 'id'); - ^ --- Adding partition tables via CREATE TABLE should have DDL access the partitioned table as well -BEGIN; - CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2009 PARTITION OF partitionin... - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Adding partition tables via ATTACH PARTITION on local tables should have DDL access the partitioned table as well -CREATE TABLE partitioning_test_2009 AS SELECT * FROM partitioning_test; -ERROR: relation "partitioning_test" does not exist -LINE 1: ...ATE TABLE partitioning_test_2009 AS SELECT * FROM partitioni... - ^ -BEGIN; - ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_... - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- Adding partition tables via ATTACH PARTITION on distributed tables should have DDL access the partitioned table as well -CREATE TABLE partitioning_test_2010 AS SELECT * FROM partitioning_test; -ERROR: relation "partitioning_test" does not exist -LINE 1: ...ATE TABLE partitioning_test_2010 AS SELECT * FROM partitioni... - ^ -SELECT create_distributed_table('partitioning_test_2010', 'id'); -ERROR: relation "partitioning_test_2010" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test_2010', 'i... - ^ -BEGIN; - ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2010 FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_... - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- reading from partitioned table marks all of its partitions -BEGIN; - SELECT count(*) FROM partitioning_test; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT count(*) FROM partitioning_test; - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- reading from partitioned table sequentially marks all of its partitions with sequential accesses -BEGIN; - SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; - SELECT count(*) FROM partitioning_test; -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT count(*) FROM partitioning_test; - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- updating partitioned table marks all of its partitions -BEGIN; - UPDATE partitioning_test SET time = now(); -ERROR: relation "partitioning_test" does not exist -LINE 1: UPDATE partitioning_test SET time = now(); - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- updating partitioned table sequentially marks all of its partitions with sequential accesses -BEGIN; - SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; - UPDATE partitioning_test SET time = now(); -ERROR: relation "partitioning_test" does not exist -LINE 1: UPDATE partitioning_test SET time = now(); - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- DDLs on partitioned table marks all of its partitions -BEGIN; - ALTER TABLE partitioning_test ADD COLUMN X INT; -ERROR: relation "partitioning_test" does not exist - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- DDLs on partitioned table sequentially marks all of its partitions with sequential accesses -BEGIN; - SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; - ALTER TABLE partitioning_test ADD COLUMN X INT; -ERROR: relation "partitioning_test" does not exist - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- reading from partition table marks its parent -BEGIN; - SELECT count(*) FROM partitioning_test_2009; -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: SELECT count(*) FROM partitioning_test_2009; - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- rreading from partition table marks its parent with sequential accesses -BEGIN; - SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; - SELECT count(*) FROM partitioning_test_2009; -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: SELECT count(*) FROM partitioning_test_2009; - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- updating from partition table marks its parent -BEGIN; - UPDATE partitioning_test_2009 SET time = now(); -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: UPDATE partitioning_test_2009 SET time = now(); - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- updating from partition table marks its parent sequential accesses -BEGIN; - SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; - UPDATE partitioning_test_2009 SET time = now(); -ERROR: relation "partitioning_test_2009" does not exist -LINE 1: UPDATE partitioning_test_2009 SET time = now(); - ^ - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; --- DDLs on partition table marks its parent -BEGIN; - CREATE INDEX i1000000 ON partitioning_test_2009 (id); -ERROR: relation "partitioning_test_2009" does not exist - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- DDLs on partition table marks its parent in sequential mode -BEGIN; - SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; - CREATE INDEX i1000000 ON partitioning_test_2009 (id); -ERROR: relation "partitioning_test_2009" does not exist - SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- TRUNCATE CASCADE works fine -ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key); -BEGIN; - TRUNCATE table_1 CASCADE; -NOTICE: truncate cascades to table "table_2" - SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+--------------+----------------- - table_1 | not_accessed | not_accessed | parallel_access - table_2 | not_accessed | not_accessed | parallel_access -(2 rows) - -ROLLBACK; --- CTEs with SELECT only should work fine -BEGIN; - - WITH cte AS (SELECT count(*) FROM table_1) - SELECT * FROM cte; - count -------- - 101 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+--------------+-------------- - table_1 | parallel_access | not_accessed | not_accessed -(1 row) - -COMMIT; --- CTEs with SELECT only in sequential mode should work fine -BEGIN; - SET LOCAL citus.multi_shard_modify_mode = 'sequential'; - WITH cte AS (SELECT count(*) FROM table_1) - SELECT * FROM cte; - count -------- - 101 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-------------------+--------------+-------------- - table_1 | sequential_access | not_accessed | not_accessed -(1 row) - -COMMIT; --- modifying CTEs should work fine with multi-row inserts, which are by default in sequential -BEGIN; - - WITH cte_1 AS (INSERT INTO table_1 VALUES (1000,1000), (1001, 1001), (1002, 1002) RETURNING *) - SELECT * FROM cte_1 ORDER BY 1; - key | value -------+------- - 1000 | 1000 - 1001 | 1001 - 1002 | 1002 -(3 rows) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+-------------------+-------------- - table_1 | not_accessed | sequential_access | not_accessed -(1 row) - -ROLLBACK; --- modifying CTEs should work fine with parallel mode -BEGIN; - - WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) - SELECT count(*) FROM cte_1 ORDER BY 1; - count -------- - 101 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+-------------- - table_1 | parallel_access | parallel_access | not_accessed -(1 row) - -ROLLBACK; --- modifying CTEs should work fine with sequential mode -BEGIN; - - WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *) - SELECT count(*) FROM cte_1 ORDER BY 1; - count -------- - 101 -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+-----------------+-----------------+-------------- - table_1 | parallel_access | parallel_access | not_accessed -(1 row) - -ROLLBACK; --- create distributed table with data loading --- should mark both parallel dml and parallel ddl -DROP TABLE table_3; -CREATE TABLE table_3 (key int, value int); -INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i; -BEGIN; - SELECT create_distributed_table('table_3', 'key'); -NOTICE: Copying data from local table... - create_distributed_table --------------------------- - -(1 row) - - SELECT * FROM relation_acesses WHERE table_name IN ('table_3') ORDER BY 1; - table_name | select_access | dml_access | ddl_access -------------+---------------+-----------------+----------------- - table_3 | not_accessed | parallel_access | parallel_access -(1 row) - -COMMIT; -SET search_path TO 'public'; -DROP SCHEMA access_tracking CASCADE; -NOTICE: drop cascades to 12 other objects -DETAIL: drop cascades to function access_tracking.relation_select_access_mode(oid) -drop cascades to function access_tracking.relation_dml_access_mode(oid) -drop cascades to function access_tracking.relation_ddl_access_mode(oid) -drop cascades to function access_tracking.relation_access_mode_to_text(integer) -drop cascades to view access_tracking.relation_acesses -drop cascades to table access_tracking.table_1 -drop cascades to table access_tracking.table_2 -drop cascades to table access_tracking.table_4 -drop cascades to table access_tracking.table_5 -drop cascades to table access_tracking.table_6 -drop cascades to table access_tracking.table_7 -drop cascades to table access_tracking.table_3 diff --git a/src/test/regress/expected/replicated_partitioned_table_1.out b/src/test/regress/expected/replicated_partitioned_table_1.out deleted file mode 100644 index cc4405995..000000000 --- a/src/test/regress/expected/replicated_partitioned_table_1.out +++ /dev/null @@ -1,358 +0,0 @@ --- --- Distributed Partitioned Table Tests --- -SET citus.next_shard_id TO 1760000; -CREATE SCHEMA partitioned_table_replicated; -SET search_path TO partitioned_table_replicated; -SET citus.shard_count TO 4; -SET citus.shard_replication_factor TO 2; --- print major version number for version-specific tests -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int AS server_version; - server_version ----------------- - 9 -(1 row) - -CREATE TABLE collections ( - key bigint, - ts timestamptz, - collection_id integer, - value numeric -) PARTITION BY LIST ( collection_id ); -ERROR: syntax error at or near "PARTITION" -LINE 6: ) PARTITION BY LIST ( collection_id ); - ^ -CREATE TABLE collections_1 - PARTITION OF collections (key, ts, collection_id, value) - FOR VALUES IN ( 1 ); -ERROR: syntax error at or near "PARTITION" -LINE 2: PARTITION OF collections (key, ts, collection_id, value) - ^ -CREATE TABLE collections_2 - PARTITION OF collections (key, ts, collection_id, value) - FOR VALUES IN ( 2 ); -ERROR: syntax error at or near "PARTITION" -LINE 2: PARTITION OF collections (key, ts, collection_id, value) - ^ --- load some data data -INSERT INTO collections (key, ts, collection_id, value) VALUES (1, '2009-01-01', 1, 1); -ERROR: relation "collections" does not exist -LINE 1: INSERT INTO collections (key, ts, collection_id, value) VALU... - ^ -INSERT INTO collections (key, ts, collection_id, value) VALUES (2, '2009-01-01', 1, 2); -ERROR: relation "collections" does not exist -LINE 1: INSERT INTO collections (key, ts, collection_id, value) VALU... - ^ -INSERT INTO collections (key, ts, collection_id, value) VALUES (3, '2009-01-01', 2, 1); -ERROR: relation "collections" does not exist -LINE 1: INSERT INTO collections (key, ts, collection_id, value) VALU... - ^ -INSERT INTO collections (key, ts, collection_id, value) VALUES (4, '2009-01-01', 2, 2); -ERROR: relation "collections" does not exist -LINE 1: INSERT INTO collections (key, ts, collection_id, value) VALU... - ^ --- in the first case, we'll distributed the --- already existing partitioninong hierarcy -SELECT create_distributed_table('collections', 'key'); -ERROR: relation "collections" does not exist -LINE 1: SELECT create_distributed_table('collections', 'key'); - ^ --- now create partition of a already distributed table -CREATE TABLE collections_3 PARTITION OF collections FOR VALUES IN ( 3 ); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE collections_3 PARTITION OF collections FOR VALU... - ^ --- now attaching non distributed table to a distributed table -CREATE TABLE collections_4 AS SELECT * FROM collections LIMIT 0; -ERROR: relation "collections" does not exist -LINE 1: CREATE TABLE collections_4 AS SELECT * FROM collections LIMI... - ^ --- load some data -INSERT INTO collections_4 SELECT i, '2009-01-01', 4, i FROM generate_series (0, 10) i; -ERROR: relation "collections_4" does not exist -LINE 1: INSERT INTO collections_4 SELECT i, '2009-01-01', 4, i FROM ... - ^ -ALTER TABLE collections ATTACH PARTITION collections_4 FOR VALUES IN ( 4 ); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE collections ATTACH PARTITION collections_4 FOR V... - ^ --- finally attach a distributed table to a distributed table -CREATE TABLE collections_5 AS SELECT * FROM collections LIMIT 0; -ERROR: relation "collections" does not exist -LINE 1: CREATE TABLE collections_5 AS SELECT * FROM collections LIMI... - ^ -SELECT create_distributed_table('collections_5', 'key'); -ERROR: relation "collections_5" does not exist -LINE 1: SELECT create_distributed_table('collections_5', 'key'); - ^ --- load some data -INSERT INTO collections_5 SELECT i, '2009-01-01', 5, i FROM generate_series (0, 10) i; -ERROR: relation "collections_5" does not exist -LINE 1: INSERT INTO collections_5 SELECT i, '2009-01-01', 5, i FROM ... - ^ -ALTER TABLE collections ATTACH PARTITION collections_5 FOR VALUES IN ( 5 ); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE collections ATTACH PARTITION collections_5 FOR V... - ^ --- make sure that we've all the placements -SELECT - logicalrelid, count(*) as placement_count -FROM - pg_dist_shard, pg_dist_shard_placement -WHERE - logicalrelid::text LIKE '%collections%' AND - pg_dist_shard.shardid = pg_dist_shard_placement.shardid -GROUP BY - logicalrelid -ORDER BY - 1,2; - logicalrelid | placement_count ---------------+----------------- -(0 rows) - --- and, make sure that all tables are colocated -SELECT - count(DISTINCT colocationid) -FROM - pg_dist_partition -WHERE - logicalrelid::text LIKE '%collections%'; - count -------- - 0 -(1 row) - --- make sure that any kind of modification is disallowed on partitions --- given that replication factor > 1 -INSERT INTO collections_4 (key, ts, collection_id, value) VALUES (4, '2009-01-01', 2, 2); -ERROR: relation "collections_4" does not exist -LINE 1: INSERT INTO collections_4 (key, ts, collection_id, value) VA... - ^ --- single shard update/delete not allowed -UPDATE collections_1 SET ts = now() WHERE key = 1; -ERROR: relation "collections_1" does not exist -LINE 1: UPDATE collections_1 SET ts = now() WHERE key = 1; - ^ -DELETE FROM collections_1 WHERE ts = now() AND key = 1; -ERROR: relation "collections_1" does not exist -LINE 1: DELETE FROM collections_1 WHERE ts = now() AND key = 1; - ^ --- multi shard update/delete are not allowed -UPDATE collections_1 SET ts = now(); -ERROR: relation "collections_1" does not exist -LINE 1: UPDATE collections_1 SET ts = now(); - ^ -DELETE FROM collections_1 WHERE ts = now(); -ERROR: relation "collections_1" does not exist -LINE 1: DELETE FROM collections_1 WHERE ts = now(); - ^ --- insert..select pushdown -INSERT INTO collections_1 SELECT * FROM collections_1; -ERROR: relation "collections_1" does not exist -LINE 1: INSERT INTO collections_1 SELECT * FROM collections_1; - ^ --- insert..select via coordinator -INSERT INTO collections_1 SELECT * FROM collections_1 OFFSET 0; -ERROR: relation "collections_1" does not exist -LINE 1: INSERT INTO collections_1 SELECT * FROM collections_1 OFFSET... - ^ --- COPY is not allowed -COPY collections_1 FROM STDIN; -ERROR: relation "collections_1" does not exist -\. -invalid command \. --- DDLs are not allowed -CREATE INDEX index_on_partition ON collections_1(key); -ERROR: relation "collections_1" does not exist --- EXPLAIN with modifications is not allowed as well -UPDATE collections_1 SET ts = now() WHERE key = 1; -ERROR: relation "collections_1" does not exist -LINE 1: UPDATE collections_1 SET ts = now() WHERE key = 1; - ^ --- TRUNCATE is also not allowed -TRUNCATE collections_1; -ERROR: relation "collections_1" does not exist -TRUNCATE collections, collections_1; -ERROR: relation "collections" does not exist --- modifying CTEs are also not allowed -WITH collections_5_cte AS -( - DELETE FROM collections_5 RETURNING * -) -SELECT * FROM collections_5_cte; -ERROR: relation "collections_5" does not exist -LINE 3: DELETE FROM collections_5 RETURNING * - ^ --- foreign key creation is disallowed due to replication factor > 1 -CREATE TABLE fkey_test (key bigint PRIMARY KEY); -SELECT create_distributed_table('fkey_test', 'key'); - create_distributed_table --------------------------- - -(1 row) - -ALTER TABLE - collections_5 -ADD CONSTRAINT - fkey_delete FOREIGN KEY(key) -REFERENCES - fkey_test(key) ON DELETE CASCADE; -ERROR: relation "collections_5" does not exist --- we should be able to attach and detach partitions --- given that those DDLs are on the parent table -CREATE TABLE collections_6 - PARTITION OF collections (key, ts, collection_id, value) - FOR VALUES IN ( 6 ); -ERROR: syntax error at or near "PARTITION" -LINE 2: PARTITION OF collections (key, ts, collection_id, value) - ^ -ALTER TABLE collections DETACH PARTITION collections_6; -ERROR: syntax error at or near "DETACH" -LINE 1: ALTER TABLE collections DETACH PARTITION collections_6; - ^ -ALTER TABLE collections ATTACH PARTITION collections_6 FOR VALUES IN ( 6 ); -ERROR: syntax error at or near "ATTACH" -LINE 1: ALTER TABLE collections ATTACH PARTITION collections_6 FOR V... - ^ --- read queries works just fine -SELECT count(*) FROM collections_1 WHERE key = 1; -ERROR: relation "collections_1" does not exist -LINE 1: SELECT count(*) FROM collections_1 WHERE key = 1; - ^ -SELECT count(*) FROM collections_1 WHERE key != 1; -ERROR: relation "collections_1" does not exist -LINE 1: SELECT count(*) FROM collections_1 WHERE key != 1; - ^ --- rollups SELECT'ing from partitions should work just fine -CREATE TABLE collections_agg ( - key bigint, - sum_value numeric -); -SELECT create_distributed_table('collections_agg', 'key'); - create_distributed_table --------------------------- - -(1 row) - --- pushdown roll-up -INSERT INTO collections_agg SELECT key, sum(key) FROM collections_1 GROUP BY key; -ERROR: relation "collections_1" does not exist -LINE 1: ...RT INTO collections_agg SELECT key, sum(key) FROM collection... - ^ --- coordinator roll-up -INSERT INTO collections_agg SELECT collection_id, sum(key) FROM collections_1 GROUP BY collection_id; -ERROR: relation "collections_1" does not exist -LINE 1: ...llections_agg SELECT collection_id, sum(key) FROM collection... - ^ --- now make sure that repair functionality works fine --- create a table and create its distribution metadata -CREATE TABLE customer_engagements (id integer, event_id int) PARTITION BY LIST ( event_id ); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...E customer_engagements (id integer, event_id int) PARTITION ... - ^ -CREATE TABLE customer_engagements_1 - PARTITION OF customer_engagements - FOR VALUES IN ( 1 ); -ERROR: syntax error at or near "PARTITION" -LINE 2: PARTITION OF customer_engagements - ^ -CREATE TABLE customer_engagements_2 - PARTITION OF customer_engagements - FOR VALUES IN ( 2 ); -ERROR: syntax error at or near "PARTITION" -LINE 2: PARTITION OF customer_engagements - ^ --- add some indexes -CREATE INDEX ON customer_engagements (id); -ERROR: relation "customer_engagements" does not exist -CREATE INDEX ON customer_engagements (event_id); -ERROR: relation "customer_engagements" does not exist -CREATE INDEX ON customer_engagements (id, event_id); -ERROR: relation "customer_engagements" does not exist --- distribute the table --- create a single shard on the first worker -SET citus.shard_count TO 1; -SET citus.shard_replication_factor TO 2; -SELECT create_distributed_table('customer_engagements', 'id', 'hash'); -ERROR: relation "customer_engagements" does not exist -LINE 1: SELECT create_distributed_table('customer_engagements', 'id'... - ^ --- ingest some data for the tests -INSERT INTO customer_engagements VALUES (1, 1); -ERROR: relation "customer_engagements" does not exist -LINE 1: INSERT INTO customer_engagements VALUES (1, 1); - ^ -INSERT INTO customer_engagements VALUES (2, 1); -ERROR: relation "customer_engagements" does not exist -LINE 1: INSERT INTO customer_engagements VALUES (2, 1); - ^ -INSERT INTO customer_engagements VALUES (1, 2); -ERROR: relation "customer_engagements" does not exist -LINE 1: INSERT INTO customer_engagements VALUES (1, 2); - ^ -INSERT INTO customer_engagements VALUES (2, 2); -ERROR: relation "customer_engagements" does not exist -LINE 1: INSERT INTO customer_engagements VALUES (2, 2); - ^ --- the following queries does the following: --- (i) create a new shard --- (ii) mark the second shard placements as unhealthy --- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones --- (iv) do a successful master_copy_shard_placement from the first placement to the second --- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement -SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset -SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport=:worker_1_port \gset --- get the newshardid -SELECT shardid as newshardid FROM pg_dist_shard WHERE logicalrelid = 'customer_engagements'::regclass -\gset -ERROR: relation "customer_engagements" does not exist -LINE 1: ...ewshardid FROM pg_dist_shard WHERE logicalrelid = 'customer_... - ^ --- now, update the second placement as unhealthy -UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid - AND groupid = :worker_2_group; -ERROR: syntax error at or near ":" -LINE 1: ...dist_placement SET shardstate = 3 WHERE shardid = :newshardi... - ^ --- cannot repair a shard after a modification (transaction still open during repair) -BEGIN; -INSERT INTO customer_engagements VALUES (1, 1); -ERROR: relation "customer_engagements" does not exist -LINE 1: INSERT INTO customer_engagements VALUES (1, 1); - ^ -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); -ERROR: syntax error at or near ":" -LINE 1: SELECT master_copy_shard_placement(:newshardid, 'localhost',... - ^ -ROLLBACK; --- modifications after reparing a shard are fine (will use new metadata) -BEGIN; -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); -ERROR: syntax error at or near ":" -LINE 1: SELECT master_copy_shard_placement(:newshardid, 'localhost',... - ^ -ALTER TABLE customer_engagements ADD COLUMN value float DEFAULT 1.0; -ERROR: current transaction is aborted, commands ignored until end of transaction block -SELECT * FROM customer_engagements ORDER BY 1,2,3; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; -BEGIN; -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); -ERROR: syntax error at or near ":" -LINE 1: SELECT master_copy_shard_placement(:newshardid, 'localhost',... - ^ -INSERT INTO customer_engagements VALUES (1, 1); -ERROR: current transaction is aborted, commands ignored until end of transaction block -SELECT count(*) FROM customer_engagements; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- TRUNCATE is allowed on the parent table --- try it just before dropping the table -TRUNCATE collections; -ERROR: relation "collections" does not exist -SET search_path TO public; -DROP SCHEMA partitioned_table_replicated CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table partitioned_table_replicated.fkey_test -drop cascades to table partitioned_table_replicated.collections_agg diff --git a/src/test/regress/expected/subquery_partitioning_0.out b/src/test/regress/expected/subquery_partitioning_0.out deleted file mode 100644 index 3e3771511..000000000 --- a/src/test/regress/expected/subquery_partitioning_0.out +++ /dev/null @@ -1,246 +0,0 @@ --- =================================================================== --- test recursive planning functionality on partitioned tables --- =================================================================== -CREATE SCHEMA subquery_and_partitioning; -SET search_path TO subquery_and_partitioning, public; -CREATE TABLE users_table_local AS SELECT * FROM users_table; -CREATE TABLE events_table_local AS SELECT * FROM events_table; -CREATE TABLE partitioning_test(id int, value_1 int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...partitioning_test(id int, value_1 int, time date) PARTITION ... - ^ - --- create its partitions -CREATE TABLE partitioning_test_2017 PARTITION OF partitioning_test FOR VALUES FROM ('2017-01-01') TO ('2018-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2017 PARTITION OF partitionin... - ^ -CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: CREATE TABLE partitioning_test_2010 PARTITION OF partitionin... - ^ --- load some data and distribute tables -INSERT INTO partitioning_test VALUES (1, 1, '2017-11-23'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES (1, 1, '2017-11-23'); - ^ -INSERT INTO partitioning_test VALUES (2, 1, '2010-07-07'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES (2, 1, '2010-07-07'); - ^ -INSERT INTO partitioning_test_2017 VALUES (3, 3, '2017-11-22'); -ERROR: relation "partitioning_test_2017" does not exist -LINE 1: INSERT INTO partitioning_test_2017 VALUES (3, 3, '2017-11-22... - ^ -INSERT INTO partitioning_test_2010 VALUES (4, 4, '2010-03-03'); -ERROR: relation "partitioning_test_2010" does not exist -LINE 1: INSERT INTO partitioning_test_2010 VALUES (4, 4, '2010-03-03... - ^ --- distribute partitioned table -SET citus.shard_replication_factor TO 1; -SELECT create_distributed_table('partitioning_test', 'id'); -ERROR: relation "partitioning_test" does not exist -LINE 1: SELECT create_distributed_table('partitioning_test', 'id'); - ^ -SET client_min_messages TO DEBUG1; --- subplan for partitioned tables -SELECT - id -FROM - (SELECT - DISTINCT partitioning_test.id - FROM - partitioning_test - LIMIT 5 - ) as foo - ORDER BY 1 DESC; -ERROR: relation "partitioning_test" does not exist -LINE 7: partitioning_test - ^ --- final query is router on partitioned tables -SELECT - * -FROM - (SELECT - DISTINCT partitioning_test.id - FROM - partitioning_test - LIMIT 5 - ) as foo, - (SELECT - DISTINCT partitioning_test.time - FROM - partitioning_test - LIMIT 5 - ) as bar - WHERE foo.id = date_part('day', bar.time) - ORDER BY 2 DESC, 1; -ERROR: relation "partitioning_test" does not exist -LINE 7: partitioning_test - ^ --- final query is real-time -SELECT - * -FROM - (SELECT - DISTINCT partitioning_test.time - FROM - partitioning_test - ORDER BY 1 DESC - LIMIT 5 - ) as foo, - ( - SELECT - DISTINCT partitioning_test.id - FROM - partitioning_test - ) as bar - WHERE date_part('day', foo.time) = bar.id - ORDER BY 2 DESC, 1 DESC - LIMIT 3; -ERROR: relation "partitioning_test" does not exist -LINE 7: partitioning_test - ^ --- final query is real-time that is joined with partitioned table -SELECT - * -FROM - (SELECT - DISTINCT partitioning_test.time - FROM - partitioning_test - ORDER BY 1 DESC - LIMIT 5 - ) as foo, - ( - SELECT - DISTINCT partitioning_test.id - FROM - partitioning_test - ) as bar, - partitioning_test - WHERE date_part('day', foo.time) = bar.id AND partitioning_test.id = bar.id - ORDER BY 2 DESC, 1 DESC - LIMIT 3; -ERROR: relation "partitioning_test" does not exist -LINE 7: partitioning_test - ^ --- subquery in WHERE clause -SELECT DISTINCT id -FROM partitioning_test -WHERE - id IN (SELECT DISTINCT date_part('day', time) FROM partitioning_test); -ERROR: relation "partitioning_test" does not exist -LINE 2: FROM partitioning_test - ^ --- repartition subquery -SET citus.enable_repartition_joins to ON; -SELECT - count(*) -FROM -( - SELECT DISTINCT p1.value_1 FROM partitioning_test as p1, partitioning_test as p2 WHERE p1.id = p2.value_1 -) as foo, -( - SELECT user_id FROM users_table -) as bar -WHERE foo.value_1 = bar.user_id; -ERROR: relation "partitioning_test" does not exist -LINE 5: SELECT DISTINCT p1.value_1 FROM partitioning_test as p1, pa... - ^ -SET citus.enable_repartition_joins to OFF; --- subquery, cte, view and non-partitioned tables -CREATE VIEW subquery_and_ctes AS -SELECT - * -FROM -( - WITH cte AS ( - WITH local_cte AS ( - SELECT * FROM users_table_local - ), - dist_cte AS ( - SELECT - user_id - FROM - events_table, - (SELECT DISTINCT value_1 FROM partitioning_test OFFSET 0) as foo - WHERE - events_table.user_id = foo.value_1 AND - events_table.user_id IN (SELECT DISTINCT value_1 FROM users_table ORDER BY 1 LIMIT 3) - ) - SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id -) -SELECT - count(*) as cnt -FROM - cte, - (SELECT - DISTINCT events_table.user_id - FROM - partitioning_test, events_table - WHERE - events_table.user_id = partitioning_test.id AND - event_type IN (1,2,3,4) - ORDER BY 1 DESC LIMIT 5 - ) as foo - WHERE foo.user_id = cte.user_id -) as foo, users_table WHERE foo.cnt > users_table.value_2; -ERROR: relation "partitioning_test" does not exist -LINE 15: (SELECT DISTINCT value_1 FROM partitioning_test OFFSET 0)... - ^ -SELECT * FROM subquery_and_ctes -ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC -LIMIT 5; -ERROR: relation "subquery_and_ctes" does not exist -LINE 1: SELECT * FROM subquery_and_ctes - ^ --- deep subquery, partitioned and non-partitioned tables together -SELECT count(*) -FROM -( - SELECT avg(min) FROM - ( - SELECT min(partitioning_test.value_1) FROM - ( - SELECT avg(event_type) as avg_ev_type FROM - ( - SELECT - max(value_1) as mx_val_1 - FROM ( - SELECT - avg(event_type) as avg - FROM - ( - SELECT - cnt - FROM - (SELECT count(*) as cnt, value_1 FROM partitioning_test GROUP BY value_1) as level_1, users_table - WHERE - users_table.user_id = level_1.cnt - ) as level_2, events_table - WHERE events_table.user_id = level_2.cnt - GROUP BY level_2.cnt - ) as level_3, users_table - WHERE user_id = level_3.avg - GROUP BY level_3.avg - ) as level_4, events_table - WHERE level_4.mx_val_1 = events_table.user_id - GROUP BY level_4.mx_val_1 - ) as level_5, partitioning_test - WHERE - level_5.avg_ev_type = partitioning_test.id - GROUP BY - level_5.avg_ev_type - ) as level_6, users_table WHERE users_table.user_id = level_6.min - GROUP BY users_table.value_1 - ) as bar; -ERROR: relation "partitioning_test" does not exist -LINE 20: (SELECT count(*) as cnt, value_1 FROM partitioning_... - ^ -SET client_min_messages TO DEFAULT; -DROP SCHEMA subquery_and_partitioning CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table users_table_local -drop cascades to table events_table_local -SET search_path TO public; diff --git a/src/test/regress/expected/with_partitioning_0.out b/src/test/regress/expected/with_partitioning_0.out deleted file mode 100644 index c7b95adb8..000000000 --- a/src/test/regress/expected/with_partitioning_0.out +++ /dev/null @@ -1,95 +0,0 @@ -CREATE SCHEMA with_partitioning; -SET search_path TO with_partitioning, public; -SET citus.shard_replication_factor TO 1; -CREATE TABLE with_partitioning.local_users_2 (user_id int, event_type int); -INSERT INTO local_users_2 VALUES (0, 0), (1, 4), (1, 7), (2, 1), (3, 3), (5, 4), (6, 2), (10, 7); -CREATE TABLE with_partitioning.partitioning_test(id int, time date) PARTITION BY RANGE (time); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...partitioning.partitioning_test(id int, time date) PARTITION ... - ^ - --- create its partitions -CREATE TABLE with_partitioning.partitioning_test_2017 PARTITION OF partitioning_test FOR VALUES FROM ('2017-01-01') TO ('2018-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...TE TABLE with_partitioning.partitioning_test_2017 PARTITION ... - ^ -CREATE TABLE with_partitioning.partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -ERROR: syntax error at or near "PARTITION" -LINE 1: ...TE TABLE with_partitioning.partitioning_test_2010 PARTITION ... - ^ --- load some data and distribute tables -INSERT INTO partitioning_test VALUES (1, '2017-11-23'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES (1, '2017-11-23'); - ^ -INSERT INTO partitioning_test VALUES (2, '2010-07-07'); -ERROR: relation "partitioning_test" does not exist -LINE 1: INSERT INTO partitioning_test VALUES (2, '2010-07-07'); - ^ -INSERT INTO partitioning_test_2017 VALUES (3, '2017-11-22'); -ERROR: relation "partitioning_test_2017" does not exist -LINE 1: INSERT INTO partitioning_test_2017 VALUES (3, '2017-11-22'); - ^ -INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); -ERROR: relation "partitioning_test_2010" does not exist -LINE 1: INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); - ^ --- distribute partitioned table -SELECT create_distributed_table('with_partitioning.partitioning_test', 'id'); -ERROR: relation "with_partitioning.partitioning_test" does not exist -LINE 1: SELECT create_distributed_table('with_partitioning.partition... - ^ --- Join of a CTE on distributed table and then join with a partitioned table -WITH cte AS ( - SELECT * FROM users_table -) -SELECT DISTINCT ON (id) id, cte.time FROM cte join partitioning_test on cte.time::date=partitioning_test.time ORDER BY 1, 2 LIMIT 3; -ERROR: relation "partitioning_test" does not exist -LINE 4: ...ELECT DISTINCT ON (id) id, cte.time FROM cte join partitioni... - ^ --- Join of a CTE on distributed table and then join with a partitioned table hitting on only one partition -WITH cte AS ( - SELECT * FROM users_table -) -SELECT DISTINCT ON (id) id, cte.time FROM cte join partitioning_test on cte.time::date=partitioning_test.time WHERE partitioning_test.time >'2017-11-20' ORDER BY 1, 2 LIMIT 3; -ERROR: relation "partitioning_test" does not exist -LINE 4: ...ELECT DISTINCT ON (id) id, cte.time FROM cte join partitioni... - ^ --- Join with a distributed table and then join of two CTEs -WITH cte AS ( - SELECT id, time FROM partitioning_test -), -cte_2 AS ( - SELECT * FROM partitioning_test WHERE id > 2 -), -cte_joined AS ( - SELECT user_id, cte_2.time FROM users_table join cte_2 on (users_table.time::date = cte_2.time) -), -cte_joined_2 AS ( - SELECT user_id, cte_joined.time FROM cte_joined join cte on (cte_joined.time = cte.time) -) -SELECT DISTINCT ON (event_type) event_type, cte_joined_2.user_id FROM events_table join cte_joined_2 on (cte_joined_2.time=events_table.time::date) ORDER BY 1, 2 LIMIT 10 OFFSET 2; -ERROR: relation "partitioning_test" does not exist -LINE 2: SELECT id, time FROM partitioning_test - ^ --- Join a partitioned table with a local table (both in CTEs) --- and then with a distributed table. After all join with a --- partitioned table again -WITH cte AS ( - SELECT id, time FROM partitioning_test -), -cte_2 AS ( - SELECT * FROM local_users_2 -), -cte_joined AS ( - SELECT user_id, cte.time FROM cte join cte_2 on (cte.id = cte_2.user_id) -), -cte_joined_2 AS ( - SELECT users_table.user_id, cte_joined.time FROM cte_joined join users_table on (cte_joined.time = users_table.time::date) -) -SELECT DISTINCT ON (id) id, cte_joined_2.time FROM cte_joined_2 join partitioning_test on (cte_joined_2.time=partitioning_test.time) ORDER BY 1, 2; -ERROR: relation "partitioning_test" does not exist -LINE 2: SELECT id, time FROM partitioning_test - ^ -DROP SCHEMA with_partitioning CASCADE; -NOTICE: drop cascades to table local_users_2