Drop PG13 Support Phase 2 - Remove PG13 specific paths/tests (#7007)

This commit is the second and last phase of dropping PG13 support.

It consists of the following:

- Removes all PG_VERSION_13 & PG_VERSION_14 from codepaths
- Removes pg_version_compat entries and columnar_version_compat entries
specific for PG13
- Removes alternative pg13 test outputs 
- Removes PG13 normalize lines and fix the test outputs based on that

It is a continuation of 5bf163a27d
pull/6984/head
Naisila Puka 2023-06-21 14:18:23 +03:00 committed by GitHub
parent 1bb667ce6e
commit 69af3e8509
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
93 changed files with 348 additions and 11137 deletions

View File

@ -159,5 +159,5 @@ MemoryContextTotals(MemoryContext context, MemoryContextCounters *counters)
MemoryContextTotals(child, counters); MemoryContextTotals(child, counters);
} }
context->methods->stats_compat(context, NULL, NULL, counters, true); context->methods->stats(context, NULL, NULL, counters, true);
} }

View File

@ -1623,12 +1623,8 @@ StartModifyRelation(Relation rel)
{ {
EState *estate = create_estate_for_relation(rel); EState *estate = create_estate_for_relation(rel);
#if PG_VERSION_NUM >= PG_VERSION_14
ResultRelInfo *resultRelInfo = makeNode(ResultRelInfo); ResultRelInfo *resultRelInfo = makeNode(ResultRelInfo);
InitResultRelInfo(resultRelInfo, rel, 1, NULL, 0); InitResultRelInfo(resultRelInfo, rel, 1, NULL, 0);
#else
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
#endif
/* ExecSimpleRelationInsert, ... require caller to open indexes */ /* ExecSimpleRelationInsert, ... require caller to open indexes */
ExecOpenIndices(resultRelInfo, false); ExecOpenIndices(resultRelInfo, false);
@ -1658,7 +1654,7 @@ InsertTupleAndEnforceConstraints(ModifyState *state, Datum *values, bool *nulls)
ExecStoreHeapTuple(tuple, slot, false); ExecStoreHeapTuple(tuple, slot, false);
/* use ExecSimpleRelationInsert to enforce constraints */ /* use ExecSimpleRelationInsert to enforce constraints */
ExecSimpleRelationInsert_compat(state->resultRelInfo, state->estate, slot); ExecSimpleRelationInsert(state->resultRelInfo, state->estate, slot);
} }
@ -1689,12 +1685,8 @@ FinishModifyRelation(ModifyState *state)
ExecCloseIndices(state->resultRelInfo); ExecCloseIndices(state->resultRelInfo);
AfterTriggerEndQuery(state->estate); AfterTriggerEndQuery(state->estate);
#if PG_VERSION_NUM >= PG_VERSION_14
ExecCloseResultRelations(state->estate); ExecCloseResultRelations(state->estate);
ExecCloseRangeTableRelations(state->estate); ExecCloseRangeTableRelations(state->estate);
#else
ExecCleanUpTriggerState(state->estate);
#endif
ExecResetTupleTable(state->estate->es_tupleTable, false); ExecResetTupleTable(state->estate->es_tupleTable, false);
FreeExecutorState(state->estate); FreeExecutorState(state->estate);
@ -1723,15 +1715,6 @@ create_estate_for_relation(Relation rel)
rte->rellockmode = AccessShareLock; rte->rellockmode = AccessShareLock;
ExecInitRangeTable(estate, list_make1(rte)); ExecInitRangeTable(estate, list_make1(rte));
#if PG_VERSION_NUM < PG_VERSION_14
ResultRelInfo *resultRelInfo = makeNode(ResultRelInfo);
InitResultRelInfo(resultRelInfo, rel, 1, NULL, 0);
estate->es_result_relations = resultRelInfo;
estate->es_num_result_relations = 1;
estate->es_result_relation_info = resultRelInfo;
#endif
estate->es_output_cid = GetCurrentCommandId(true); estate->es_output_cid = GetCurrentCommandId(true);
/* Prepare to catch AFTER triggers. */ /* Prepare to catch AFTER triggers. */

View File

@ -115,9 +115,7 @@ static RangeVar * ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt,
List **columnarOptions); List **columnarOptions);
static void ColumnarProcessUtility(PlannedStmt *pstmt, static void ColumnarProcessUtility(PlannedStmt *pstmt,
const char *queryString, const char *queryString,
#if PG_VERSION_NUM >= PG_VERSION_14
bool readOnlyTree, bool readOnlyTree,
#endif
ProcessUtilityContext context, ProcessUtilityContext context,
ParamListInfo params, ParamListInfo params,
struct QueryEnvironment *queryEnv, struct QueryEnvironment *queryEnv,
@ -665,7 +663,6 @@ columnar_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
} }
#if PG_VERSION_NUM >= PG_VERSION_14
static TransactionId static TransactionId
columnar_index_delete_tuples(Relation rel, columnar_index_delete_tuples(Relation rel,
TM_IndexDeleteOp *delstate) TM_IndexDeleteOp *delstate)
@ -714,19 +711,6 @@ columnar_index_delete_tuples(Relation rel,
} }
#else
static TransactionId
columnar_compute_xid_horizon_for_tuples(Relation rel,
ItemPointerData *tids,
int nitems)
{
elog(ERROR, "columnar_compute_xid_horizon_for_tuples not implemented");
}
#endif
static void static void
columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid,
int options, BulkInsertState bistate) int options, BulkInsertState bistate)
@ -1484,8 +1468,7 @@ columnar_index_build_range_scan(Relation columnarRelation,
if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent) if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent)
{ {
/* ignore lazy VACUUM's */ /* ignore lazy VACUUM's */
OldestXmin = GetOldestNonRemovableTransactionId_compat(columnarRelation, OldestXmin = GetOldestNonRemovableTransactionId(columnarRelation);
PROCARRAY_FLAGS_VACUUM);
} }
Snapshot snapshot = { 0 }; Snapshot snapshot = { 0 };
@ -1813,8 +1796,8 @@ ColumnarReadMissingRowsIntoIndex(TableScanDesc scan, Relation indexRelation,
Relation columnarRelation = scan->rs_rd; Relation columnarRelation = scan->rs_rd;
IndexUniqueCheck indexUniqueCheck = IndexUniqueCheck indexUniqueCheck =
indexInfo->ii_Unique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO; indexInfo->ii_Unique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO;
index_insert_compat(indexRelation, indexValues, indexNulls, columnarItemPointer, index_insert(indexRelation, indexValues, indexNulls, columnarItemPointer,
columnarRelation, indexUniqueCheck, false, indexInfo); columnarRelation, indexUniqueCheck, false, indexInfo);
validateIndexState->tups_inserted += 1; validateIndexState->tups_inserted += 1;
} }
@ -2240,21 +2223,17 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
static void static void
ColumnarProcessUtility(PlannedStmt *pstmt, ColumnarProcessUtility(PlannedStmt *pstmt,
const char *queryString, const char *queryString,
#if PG_VERSION_NUM >= PG_VERSION_14
bool readOnlyTree, bool readOnlyTree,
#endif
ProcessUtilityContext context, ProcessUtilityContext context,
ParamListInfo params, ParamListInfo params,
struct QueryEnvironment *queryEnv, struct QueryEnvironment *queryEnv,
DestReceiver *dest, DestReceiver *dest,
QueryCompletion *completionTag) QueryCompletion *completionTag)
{ {
#if PG_VERSION_NUM >= PG_VERSION_14
if (readOnlyTree) if (readOnlyTree)
{ {
pstmt = copyObject(pstmt); pstmt = copyObject(pstmt);
} }
#endif
Node *parsetree = pstmt->utilityStmt; Node *parsetree = pstmt->utilityStmt;
@ -2371,8 +2350,8 @@ ColumnarProcessUtility(PlannedStmt *pstmt,
CheckCitusColumnarAlterExtensionStmt(parsetree); CheckCitusColumnarAlterExtensionStmt(parsetree);
} }
PrevProcessUtilityHook_compat(pstmt, queryString, false, context, PrevProcessUtilityHook(pstmt, queryString, false, context,
params, queryEnv, dest, completionTag); params, queryEnv, dest, completionTag);
if (columnarOptions != NIL) if (columnarOptions != NIL)
{ {
@ -2500,11 +2479,7 @@ static const TableAmRoutine columnar_am_methods = {
.tuple_get_latest_tid = columnar_get_latest_tid, .tuple_get_latest_tid = columnar_get_latest_tid,
.tuple_tid_valid = columnar_tuple_tid_valid, .tuple_tid_valid = columnar_tuple_tid_valid,
.tuple_satisfies_snapshot = columnar_tuple_satisfies_snapshot, .tuple_satisfies_snapshot = columnar_tuple_satisfies_snapshot,
#if PG_VERSION_NUM >= PG_VERSION_14
.index_delete_tuples = columnar_index_delete_tuples, .index_delete_tuples = columnar_index_delete_tuples,
#else
.compute_xid_horizon_for_tuples = columnar_compute_xid_horizon_for_tuples,
#endif
.tuple_insert = columnar_tuple_insert, .tuple_insert = columnar_tuple_insert,
.tuple_insert_speculative = columnar_tuple_insert_speculative, .tuple_insert_speculative = columnar_tuple_insert_speculative,

View File

@ -81,13 +81,6 @@ CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig)
{ {
Assert((sig == SIGINT) || (sig == SIGTERM)); Assert((sig == SIGINT) || (sig == SIGTERM));
#if PG_VERSION_NUM < PG_VERSION_14
if (timeout != 0)
{
elog(ERROR, "timeout parameter is only supported on Postgres 14 or later");
}
#endif
bool missingOk = false; bool missingOk = false;
int nodeId = ExtractNodeIdFromGlobalPID(globalPID, missingOk); int nodeId = ExtractNodeIdFromGlobalPID(globalPID, missingOk);
int processId = ExtractProcessIdFromGlobalPID(globalPID); int processId = ExtractProcessIdFromGlobalPID(globalPID);
@ -102,14 +95,9 @@ CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig)
} }
else else
{ {
#if PG_VERSION_NUM >= PG_VERSION_14
appendStringInfo(cancelQuery, appendStringInfo(cancelQuery,
"SELECT pg_terminate_backend(%d::integer, %lu::bigint)", "SELECT pg_terminate_backend(%d::integer, %lu::bigint)",
processId, timeout); processId, timeout);
#else
appendStringInfo(cancelQuery, "SELECT pg_terminate_backend(%d::integer)",
processId);
#endif
} }
int connectionFlags = 0; int connectionFlags = 0;

View File

@ -114,13 +114,6 @@ PreprocessClusterStmt(Node *node, const char *clusterCommand,
static bool static bool
IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt) IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt)
{ {
#if PG_VERSION_NUM < PG_VERSION_14
if (clusterStmt->options & CLUOPT_VERBOSE)
{
return true;
}
return false;
#else
DefElem *opt = NULL; DefElem *opt = NULL;
foreach_ptr(opt, clusterStmt->params) foreach_ptr(opt, clusterStmt->params)
{ {
@ -130,5 +123,4 @@ IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt)
} }
} }
return false; return false;
#endif
} }

View File

@ -214,13 +214,7 @@ DeferErrorIfCircularDependencyExists(const ObjectAddress *objectAddress)
dependency->objectId == objectAddress->objectId && dependency->objectId == objectAddress->objectId &&
dependency->objectSubId == objectAddress->objectSubId) dependency->objectSubId == objectAddress->objectSubId)
{ {
char *objectDescription = NULL; char *objectDescription = getObjectDescription(objectAddress, false);
#if PG_VERSION_NUM >= PG_VERSION_14
objectDescription = getObjectDescription(objectAddress, false);
#else
objectDescription = getObjectDescription(objectAddress);
#endif
StringInfo detailInfo = makeStringInfo(); StringInfo detailInfo = makeStringInfo();
appendStringInfo(detailInfo, "\"%s\" circularly depends itself, resolve " appendStringInfo(detailInfo, "\"%s\" circularly depends itself, resolve "
@ -529,9 +523,9 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
*/ */
Assert(false); Assert(false);
ereport(ERROR, (errmsg("unsupported object %s for distribution by citus", ereport(ERROR, (errmsg("unsupported object %s for distribution by citus",
getObjectTypeDescription_compat(dependency, getObjectTypeDescription(dependency,
/* missingOk: */ false)), /* missingOk: */ false)),
errdetail( errdetail(
"citus tries to recreate an unsupported object on its workers"), "citus tries to recreate an unsupported object on its workers"),
errhint("please report a bug as this should not be happening"))); errhint("please report a bug as this should not be happening")));

View File

@ -1531,7 +1531,7 @@ GetDistributeObjectOps(Node *node)
case T_AlterTableStmt: case T_AlterTableStmt:
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
switch (AlterTableStmtObjType_compat(stmt)) switch (stmt->objtype)
{ {
case OBJECT_TYPE: case OBJECT_TYPE:
{ {

View File

@ -206,11 +206,7 @@ MakeCollateClauseFromOid(Oid collationOid)
List *objName = NIL; List *objName = NIL;
List *objArgs = NIL; List *objArgs = NIL;
#if PG_VERSION_NUM >= PG_VERSION_14
getObjectIdentityParts(&collateAddress, &objName, &objArgs, false); getObjectIdentityParts(&collateAddress, &objName, &objArgs, false);
#else
getObjectIdentityParts(&collateAddress, &objName, &objArgs);
#endif
char *name = NULL; char *name = NULL;
foreach_ptr(name, objName) foreach_ptr(name, objName)

View File

@ -1641,7 +1641,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString,
* workers * workers
*/ */
const char *functionName = const char *functionName =
getObjectIdentity_compat(address, /* missingOk: */ false); getObjectIdentity(address, /* missingOk: */ false);
ereport(ERROR, (errmsg("distrtibuted functions are not allowed to depend on an " ereport(ERROR, (errmsg("distrtibuted functions are not allowed to depend on an "
"extension"), "extension"),
errdetail("Function \"%s\" is already distributed. Functions from " errdetail("Function \"%s\" is already distributed. Functions from "
@ -1811,8 +1811,8 @@ GenerateBackupNameForProcCollision(const ObjectAddress *address)
List *newProcName = list_make2(namespace, makeString(newName)); List *newProcName = list_make2(namespace, makeString(newName));
/* don't need to rename if the input arguments don't match */ /* don't need to rename if the input arguments don't match */
FuncCandidateList clist = FuncnameGetCandidates_compat(newProcName, numargs, NIL, FuncCandidateList clist = FuncnameGetCandidates(newProcName, numargs, NIL,
false, false, false, true); false, false, false, true);
for (; clist; clist = clist->next) for (; clist; clist = clist->next)
{ {
if (memcmp(clist->args, argtypes, sizeof(Oid) * numargs) == 0) if (memcmp(clist->args, argtypes, sizeof(Oid) * numargs) == 0)

View File

@ -216,10 +216,10 @@ DoLocalCopy(StringInfo buffer, Oid relationId, int64 shardId, CopyStmt *copyStat
ParseState *pState = make_parsestate(NULL); ParseState *pState = make_parsestate(NULL);
(void) addRangeTableEntryForRelation(pState, shard, AccessShareLock, (void) addRangeTableEntryForRelation(pState, shard, AccessShareLock,
NULL, false, false); NULL, false, false);
CopyFromState cstate = BeginCopyFrom_compat(pState, shard, NULL, NULL, false, CopyFromState cstate = BeginCopyFrom(pState, shard, NULL, NULL, false,
ReadFromLocalBufferCallback, ReadFromLocalBufferCallback,
copyStatement->attlist, copyStatement->attlist,
copyStatement->options); copyStatement->options);
CopyFrom(cstate); CopyFrom(cstate);
EndCopyFrom(cstate); EndCopyFrom(cstate);

View File

@ -258,9 +258,6 @@ static CopyCoercionData * ColumnCoercionPaths(TupleDesc destTupleDescriptor,
Oid *finalColumnTypeArray); Oid *finalColumnTypeArray);
static FmgrInfo * TypeOutputFunctions(uint32 columnCount, Oid *typeIdArray, static FmgrInfo * TypeOutputFunctions(uint32 columnCount, Oid *typeIdArray,
bool binaryFormat); bool binaryFormat);
#if PG_VERSION_NUM < PG_VERSION_14
static List * CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist);
#endif
static bool CopyStatementHasFormat(CopyStmt *copyStatement, char *formatName); static bool CopyStatementHasFormat(CopyStmt *copyStatement, char *formatName);
static void CitusCopyFrom(CopyStmt *copyStatement, QueryCompletion *completionTag); static void CitusCopyFrom(CopyStmt *copyStatement, QueryCompletion *completionTag);
static void EnsureCopyCanRunOnRelation(Oid relationId); static void EnsureCopyCanRunOnRelation(Oid relationId);
@ -609,14 +606,14 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag)
} }
/* initialize copy state to read from COPY data source */ /* initialize copy state to read from COPY data source */
CopyFromState copyState = BeginCopyFrom_compat(NULL, CopyFromState copyState = BeginCopyFrom(NULL,
copiedDistributedRelation, copiedDistributedRelation,
NULL, NULL,
copyStatement->filename, copyStatement->filename,
copyStatement->is_program, copyStatement->is_program,
NULL, NULL,
copyStatement->attlist, copyStatement->attlist,
copyStatement->options); copyStatement->options);
/* set up callback to identify error line number */ /* set up callback to identify error line number */
errorCallback.callback = CopyFromErrorCallback; errorCallback.callback = CopyFromErrorCallback;
@ -648,9 +645,7 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag)
++processedRowCount; ++processedRowCount;
#if PG_VERSION_NUM >= PG_VERSION_14
pgstat_progress_update_param(PROGRESS_COPY_TUPLES_PROCESSED, processedRowCount); pgstat_progress_update_param(PROGRESS_COPY_TUPLES_PROCESSED, processedRowCount);
#endif
} }
EndCopyFrom(copyState); EndCopyFrom(copyState);
@ -890,28 +885,8 @@ CanUseBinaryCopyFormatForType(Oid typeId)
HeapTuple typeTup = typeidType(typeId); HeapTuple typeTup = typeidType(typeId);
Form_pg_type type = (Form_pg_type) GETSTRUCT(typeTup); Form_pg_type type = (Form_pg_type) GETSTRUCT(typeTup);
Oid elementType = type->typelem; Oid elementType = type->typelem;
#if PG_VERSION_NUM < PG_VERSION_14
char typeCategory = type->typcategory;
#endif
ReleaseSysCache(typeTup); ReleaseSysCache(typeTup);
#if PG_VERSION_NUM < PG_VERSION_14
/*
* In PG versions before PG14 the array_recv function would error out more
* than necessary.
*
* It errors out when the element type its oids don't match with the oid in
* the received data. This happens pretty much always for non built in
* types, because their oids differ between postgres intallations. So we
* skip binary encoding when the element type is a non built in type.
*/
if (typeCategory == TYPCATEGORY_ARRAY && elementType >= FirstNormalObjectId)
{
return false;
}
#endif
/* /*
* Any type that is a wrapper around an element type (e.g. arrays and * Any type that is a wrapper around an element type (e.g. arrays and
* ranges) require the element type to also has support for binary * ranges) require the element type to also has support for binary
@ -1682,20 +1657,6 @@ AppendCopyBinaryFooters(CopyOutState footerOutputState)
static void static void
SendCopyBegin(CopyOutState cstate) SendCopyBegin(CopyOutState cstate)
{ {
#if PG_VERSION_NUM < PG_VERSION_14
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3) {
/* old way */
if (cstate->binary)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("COPY BINARY is not supported to stdout or from stdin")));
pq_putemptymessage('H');
/* grottiness needed for old COPY OUT protocol */
pq_startcopyout();
cstate->copy_dest = COPY_OLD_FE;
return;
}
#endif
StringInfoData buf; StringInfoData buf;
int natts = list_length(cstate->attnumlist); int natts = list_length(cstate->attnumlist);
int16 format = (cstate->binary ? 1 : 0); int16 format = (cstate->binary ? 1 : 0);
@ -1715,16 +1676,6 @@ SendCopyBegin(CopyOutState cstate)
static void static void
SendCopyEnd(CopyOutState cstate) SendCopyEnd(CopyOutState cstate)
{ {
#if PG_VERSION_NUM < PG_VERSION_14
if (cstate->copy_dest != COPY_NEW_FE)
{
CopySendData(cstate, "\\.", 2);
/* Need to flush out the trailer (this also appends a newline) */
CopySendEndOfRow(cstate, true);
pq_endcopyout(false);
return;
}
#endif
/* Shouldn't have any unsent data */ /* Shouldn't have any unsent data */
Assert(cstate->fe_msgbuf->len == 0); Assert(cstate->fe_msgbuf->len == 0);
/* Send Copy Done message */ /* Send Copy Done message */
@ -1782,21 +1733,6 @@ CopySendEndOfRow(CopyOutState cstate, bool includeEndOfLine)
switch (cstate->copy_dest) switch (cstate->copy_dest)
{ {
#if PG_VERSION_NUM < PG_VERSION_14
case COPY_OLD_FE:
/* The FE/BE protocol uses \n as newline for all platforms */
if (!cstate->binary && includeEndOfLine)
CopySendChar(cstate, '\n');
if (pq_putbytes(fe_msgbuf->data, fe_msgbuf->len))
{
/* no hope of recovering connection sync, so FATAL */
ereport(FATAL,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("connection lost during COPY to stdout")));
}
break;
#endif
case COPY_FRONTEND: case COPY_FRONTEND:
/* The FE/BE protocol uses \n as newline for all platforms */ /* The FE/BE protocol uses \n as newline for all platforms */
if (!cstate->binary && includeEndOfLine) if (!cstate->binary && includeEndOfLine)
@ -3256,92 +3192,6 @@ CreateRangeTable(Relation rel, AclMode requiredAccess)
} }
#if PG_VERSION_NUM < PG_VERSION_14
/* Helper for CheckCopyPermissions(), copied from postgres */
static List *
CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist)
{
/* *INDENT-OFF* */
List *attnums = NIL;
if (attnamelist == NIL)
{
/* Generate default column list */
int attr_count = tupDesc->natts;
int i;
for (i = 0; i < attr_count; i++)
{
if (TupleDescAttr(tupDesc, i)->attisdropped)
continue;
if (TupleDescAttr(tupDesc, i)->attgenerated)
continue;
attnums = lappend_int(attnums, i + 1);
}
}
else
{
/* Validate the user-supplied list and extract attnums */
ListCell *l;
foreach(l, attnamelist)
{
char *name = strVal(lfirst(l));
int attnum;
int i;
/* Lookup column name */
attnum = InvalidAttrNumber;
for (i = 0; i < tupDesc->natts; i++)
{
Form_pg_attribute att = TupleDescAttr(tupDesc, i);
if (att->attisdropped)
continue;
if (namestrcmp(&(att->attname), name) == 0)
{
if (att->attgenerated)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("column \"%s\" is a generated column",
name),
errdetail("Generated columns cannot be used in COPY.")));
attnum = att->attnum;
break;
}
}
if (attnum == InvalidAttrNumber)
{
if (rel != NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" of relation \"%s\" does not exist",
name, RelationGetRelationName(rel))));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" does not exist",
name)));
}
/* Check for duplicates */
if (list_member_int(attnums, attnum))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
errmsg("column \"%s\" specified more than once",
name)));
attnums = lappend_int(attnums, attnum);
}
}
return attnums;
/* *INDENT-ON* */
}
#endif
/* /*
* CreateConnectionStateHash constructs a hash table which maps from socket * CreateConnectionStateHash constructs a hash table which maps from socket
* number to CopyConnectionState, passing the provided MemoryContext to * number to CopyConnectionState, passing the provided MemoryContext to

View File

@ -668,7 +668,7 @@ PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext) ProcessUtilityContext processUtilityContext)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false,
false); false);
@ -701,7 +701,7 @@ List *
AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
RangeVar *sequence = stmt->relation; RangeVar *sequence = stmt->relation;
Oid seqOid = RangeVarGetRelid(sequence, NoLock, missing_ok); Oid seqOid = RangeVarGetRelid(sequence, NoLock, missing_ok);
@ -721,7 +721,7 @@ List *
PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString) PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false,
true); true);
@ -755,7 +755,7 @@ PreprocessAlterSequencePersistenceStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext) ProcessUtilityContext processUtilityContext)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false, List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false,
false); false);
@ -788,7 +788,7 @@ List *
AlterSequencePersistenceStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) AlterSequencePersistenceStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
RangeVar *sequence = stmt->relation; RangeVar *sequence = stmt->relation;
Oid seqOid = RangeVarGetRelid(sequence, NoLock, missing_ok); Oid seqOid = RangeVarGetRelid(sequence, NoLock, missing_ok);
@ -811,7 +811,7 @@ PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext) ProcessUtilityContext processUtilityContext)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
ListCell *cmdCell = NULL; ListCell *cmdCell = NULL;
foreach(cmdCell, stmt->cmds) foreach(cmdCell, stmt->cmds)

View File

@ -1135,7 +1135,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
if (relKind == RELKIND_SEQUENCE) if (relKind == RELKIND_SEQUENCE)
{ {
AlterTableStmt *stmtCopy = copyObject(alterTableStatement); AlterTableStmt *stmtCopy = copyObject(alterTableStatement);
AlterTableStmtObjType_compat(stmtCopy) = OBJECT_SEQUENCE; stmtCopy->objtype = OBJECT_SEQUENCE;
#if (PG_VERSION_NUM >= PG_VERSION_15) #if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
@ -1165,7 +1165,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
* passes through an AlterTableStmt * passes through an AlterTableStmt
*/ */
AlterTableStmt *stmtCopy = copyObject(alterTableStatement); AlterTableStmt *stmtCopy = copyObject(alterTableStatement);
AlterTableStmtObjType_compat(stmtCopy) = OBJECT_VIEW; stmtCopy->objtype = OBJECT_VIEW;
return PreprocessAlterViewStmt((Node *) stmtCopy, alterTableCommand, return PreprocessAlterViewStmt((Node *) stmtCopy, alterTableCommand,
processUtilityContext); processUtilityContext);
} }
@ -2521,13 +2521,13 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
char relKind = get_rel_relkind(relationId); char relKind = get_rel_relkind(relationId);
if (relKind == RELKIND_SEQUENCE) if (relKind == RELKIND_SEQUENCE)
{ {
AlterTableStmtObjType_compat(alterTableStatement) = OBJECT_SEQUENCE; alterTableStatement->objtype = OBJECT_SEQUENCE;
PostprocessAlterSequenceOwnerStmt((Node *) alterTableStatement, NULL); PostprocessAlterSequenceOwnerStmt((Node *) alterTableStatement, NULL);
return; return;
} }
else if (relKind == RELKIND_VIEW) else if (relKind == RELKIND_VIEW)
{ {
AlterTableStmtObjType_compat(alterTableStatement) = OBJECT_VIEW; alterTableStatement->objtype = OBJECT_VIEW;
PostprocessAlterViewStmt((Node *) alterTableStatement, NULL); PostprocessAlterViewStmt((Node *) alterTableStatement, NULL);
return; return;
} }
@ -3517,7 +3517,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
break; break;
} }
#if PG_VERSION_NUM >= PG_VERSION_14
case AT_DetachPartitionFinalize: case AT_DetachPartitionFinalize:
{ {
ereport(ERROR, (errmsg("ALTER TABLE .. DETACH PARTITION .. FINALIZE " ereport(ERROR, (errmsg("ALTER TABLE .. DETACH PARTITION .. FINALIZE "
@ -3525,7 +3524,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
break; break;
} }
#endif
case AT_DetachPartition: case AT_DetachPartition:
{ {
/* we only allow partitioning commands if they are only subcommand */ /* we only allow partitioning commands if they are only subcommand */
@ -3537,7 +3535,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
errhint("You can issue each subcommand " errhint("You can issue each subcommand "
"separately."))); "separately.")));
} }
#if PG_VERSION_NUM >= PG_VERSION_14
PartitionCmd *partitionCommand = (PartitionCmd *) command->def; PartitionCmd *partitionCommand = (PartitionCmd *) command->def;
if (partitionCommand->concurrent) if (partitionCommand->concurrent)
@ -3546,7 +3544,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
"CONCURRENTLY commands are currently " "CONCURRENTLY commands are currently "
"unsupported."))); "unsupported.")));
} }
#endif
break; break;
} }
@ -3589,20 +3586,18 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
case AT_NoForceRowSecurity: case AT_NoForceRowSecurity:
case AT_ValidateConstraint: case AT_ValidateConstraint:
case AT_DropConstraint: /* we do the check for invalidation in AlterTableDropsForeignKey */ case AT_DropConstraint: /* we do the check for invalidation in AlterTableDropsForeignKey */
#if PG_VERSION_NUM >= PG_VERSION_14
case AT_SetCompression: case AT_SetCompression:
#endif {
{ /*
/* * We will not perform any special check for:
* We will not perform any special check for: * ALTER TABLE .. SET ACCESS METHOD ..
* ALTER TABLE .. SET ACCESS METHOD .. * ALTER TABLE .. ALTER COLUMN .. SET NOT NULL
* ALTER TABLE .. ALTER COLUMN .. SET NOT NULL * ALTER TABLE .. REPLICA IDENTITY ..
* ALTER TABLE .. REPLICA IDENTITY .. * ALTER TABLE .. VALIDATE CONSTRAINT ..
* ALTER TABLE .. VALIDATE CONSTRAINT .. * ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION ..
* ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION .. */
*/ break;
break; }
}
case AT_SetRelOptions: /* SET (...) */ case AT_SetRelOptions: /* SET (...) */
case AT_ResetRelOptions: /* RESET (...) */ case AT_ResetRelOptions: /* RESET (...) */

View File

@ -350,7 +350,7 @@ List *
AlterTypeStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) AlterTypeStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE); Assert(stmt->objtype == OBJECT_TYPE);
TypeName *typeName = MakeTypeNameFromRangeVar(stmt->relation); TypeName *typeName = MakeTypeNameFromRangeVar(stmt->relation);
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
@ -549,7 +549,7 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress)
const char *username = GetUserNameFromId(GetTypeOwner(typeAddress->objectId), false); const char *username = GetUserNameFromId(GetTypeOwner(typeAddress->objectId), false);
initStringInfo(&buf); initStringInfo(&buf);
appendStringInfo(&buf, ALTER_TYPE_OWNER_COMMAND, appendStringInfo(&buf, ALTER_TYPE_OWNER_COMMAND,
getObjectIdentity_compat(typeAddress, false), getObjectIdentity(typeAddress, false),
quote_identifier(username)); quote_identifier(username));
ddlCommands = lappend(ddlCommands, buf.data); ddlCommands = lappend(ddlCommands, buf.data);

View File

@ -33,9 +33,6 @@
#include "access/attnum.h" #include "access/attnum.h"
#include "access/heapam.h" #include "access/heapam.h"
#include "access/htup_details.h" #include "access/htup_details.h"
#if PG_VERSION_NUM < 140000
#include "access/xact.h"
#endif
#include "catalog/catalog.h" #include "catalog/catalog.h"
#include "catalog/dependency.h" #include "catalog/dependency.h"
#include "citus_version.h" #include "citus_version.h"
@ -60,9 +57,6 @@
#include "distributed/maintenanced.h" #include "distributed/maintenanced.h"
#include "distributed/multi_logical_replication.h" #include "distributed/multi_logical_replication.h"
#include "distributed/multi_partitioning_utils.h" #include "distributed/multi_partitioning_utils.h"
#if PG_VERSION_NUM < 140000
#include "distributed/metadata_cache.h"
#endif
#include "distributed/metadata_sync.h" #include "distributed/metadata_sync.h"
#include "distributed/metadata/distobject.h" #include "distributed/metadata/distobject.h"
#include "distributed/multi_executor.h" #include "distributed/multi_executor.h"
@ -107,9 +101,7 @@ static void ProcessUtilityInternal(PlannedStmt *pstmt,
struct QueryEnvironment *queryEnv, struct QueryEnvironment *queryEnv,
DestReceiver *dest, DestReceiver *dest,
QueryCompletion *completionTag); QueryCompletion *completionTag);
#if PG_VERSION_NUM >= 140000
static void set_indexsafe_procflags(void); static void set_indexsafe_procflags(void);
#endif
static char * CurrentSearchPath(void); static char * CurrentSearchPath(void);
static void IncrementUtilityHookCountersIfNecessary(Node *parsetree); static void IncrementUtilityHookCountersIfNecessary(Node *parsetree);
static void PostStandardProcessUtility(Node *parsetree); static void PostStandardProcessUtility(Node *parsetree);
@ -131,8 +123,8 @@ ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityConte
plannedStmt->commandType = CMD_UTILITY; plannedStmt->commandType = CMD_UTILITY;
plannedStmt->utilityStmt = node; plannedStmt->utilityStmt = node;
ProcessUtility_compat(plannedStmt, queryString, false, context, params, NULL, dest, ProcessUtility(plannedStmt, queryString, false, context, params, NULL, dest,
completionTag); completionTag);
} }
@ -148,25 +140,19 @@ ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityConte
void void
multi_ProcessUtility(PlannedStmt *pstmt, multi_ProcessUtility(PlannedStmt *pstmt,
const char *queryString, const char *queryString,
#if PG_VERSION_NUM >= PG_VERSION_14
bool readOnlyTree, bool readOnlyTree,
#endif
ProcessUtilityContext context, ProcessUtilityContext context,
ParamListInfo params, ParamListInfo params,
struct QueryEnvironment *queryEnv, struct QueryEnvironment *queryEnv,
DestReceiver *dest, DestReceiver *dest,
QueryCompletion *completionTag) QueryCompletion *completionTag)
{ {
Node *parsetree;
#if PG_VERSION_NUM >= PG_VERSION_14
if (readOnlyTree) if (readOnlyTree)
{ {
pstmt = copyObject(pstmt); pstmt = copyObject(pstmt);
} }
#endif
parsetree = pstmt->utilityStmt; Node *parsetree = pstmt->utilityStmt;
if (IsA(parsetree, TransactionStmt)) if (IsA(parsetree, TransactionStmt))
{ {
@ -199,8 +185,8 @@ multi_ProcessUtility(PlannedStmt *pstmt,
* that state. Since we never need to intercept transaction statements, * that state. Since we never need to intercept transaction statements,
* skip our checks and immediately fall into standard_ProcessUtility. * skip our checks and immediately fall into standard_ProcessUtility.
*/ */
PrevProcessUtility_compat(pstmt, queryString, false, context, PrevProcessUtility(pstmt, queryString, false, context,
params, queryEnv, dest, completionTag); params, queryEnv, dest, completionTag);
return; return;
} }
@ -244,8 +230,8 @@ multi_ProcessUtility(PlannedStmt *pstmt,
* Ensure that utility commands do not behave any differently until CREATE * Ensure that utility commands do not behave any differently until CREATE
* EXTENSION is invoked. * EXTENSION is invoked.
*/ */
PrevProcessUtility_compat(pstmt, queryString, false, context, PrevProcessUtility(pstmt, queryString, false, context,
params, queryEnv, dest, completionTag); params, queryEnv, dest, completionTag);
return; return;
} }
@ -276,8 +262,8 @@ multi_ProcessUtility(PlannedStmt *pstmt,
PG_TRY(); PG_TRY();
{ {
PrevProcessUtility_compat(pstmt, queryString, false, context, PrevProcessUtility(pstmt, queryString, false, context,
params, queryEnv, dest, completionTag); params, queryEnv, dest, completionTag);
StoredProcedureLevel -= 1; StoredProcedureLevel -= 1;
@ -310,8 +296,8 @@ multi_ProcessUtility(PlannedStmt *pstmt,
PG_TRY(); PG_TRY();
{ {
PrevProcessUtility_compat(pstmt, queryString, false, context, PrevProcessUtility(pstmt, queryString, false, context,
params, queryEnv, dest, completionTag); params, queryEnv, dest, completionTag);
DoBlockLevel -= 1; DoBlockLevel -= 1;
} }
@ -649,8 +635,8 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
if (IsA(parsetree, AlterTableStmt)) if (IsA(parsetree, AlterTableStmt))
{ {
AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree; AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree;
if (AlterTableStmtObjType_compat(alterTableStmt) == OBJECT_TABLE || if (alterTableStmt->objtype == OBJECT_TABLE ||
AlterTableStmtObjType_compat(alterTableStmt) == OBJECT_FOREIGN_TABLE) alterTableStmt->objtype == OBJECT_FOREIGN_TABLE)
{ {
ErrorIfAlterDropsPartitionColumn(alterTableStmt); ErrorIfAlterDropsPartitionColumn(alterTableStmt);
@ -769,8 +755,8 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
PreprocessAlterExtensionCitusStmtForCitusColumnar(parsetree); PreprocessAlterExtensionCitusStmtForCitusColumnar(parsetree);
} }
PrevProcessUtility_compat(pstmt, queryString, false, context, PrevProcessUtility(pstmt, queryString, false, context,
params, queryEnv, dest, completionTag); params, queryEnv, dest, completionTag);
if (isAlterExtensionUpdateCitusStmt) if (isAlterExtensionUpdateCitusStmt)
{ {
@ -1208,38 +1194,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
*/ */
if (ddlJob->startNewTransaction) if (ddlJob->startNewTransaction)
{ {
#if PG_VERSION_NUM < 140000
/*
* Older versions of postgres doesn't have PROC_IN_SAFE_IC flag
* so we cannot use set_indexsafe_procflags in those versions.
*
* For this reason, we do our best to ensure not grabbing any
* snapshots later in the executor.
*/
/*
* If cache is not populated, system catalog lookups will cause
* the xmin of current backend to change. Then the last phase
* of CREATE INDEX CONCURRENTLY, which is in a separate backend,
* will hang waiting for our backend and result in a deadlock.
*
* We populate the cache before starting the next transaction to
* avoid this. Most of the metadata has already been resolved in
* planning phase, we only need to lookup metadata needed for
* connection establishment.
*/
(void) CurrentDatabaseName();
/*
* ConnParams (AuthInfo and PoolInfo) gets a snapshot, which
* will blocks the remote connections to localhost. Hence we warm up
* the cache here so that after we start a new transaction, the entries
* will already be in the hash table, hence we won't be holding any snapshots.
*/
WarmUpConnParamsHash();
#endif
/* /*
* Since it is not certain whether the code-path that we followed * Since it is not certain whether the code-path that we followed
* until reaching here caused grabbing any snapshots or not, we * until reaching here caused grabbing any snapshots or not, we
@ -1258,8 +1212,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
CommitTransactionCommand(); CommitTransactionCommand();
StartTransactionCommand(); StartTransactionCommand();
#if PG_VERSION_NUM >= 140000
/* /*
* Tell other backends to ignore us, even if we grab any * Tell other backends to ignore us, even if we grab any
* snapshots via adaptive executor. * snapshots via adaptive executor.
@ -1274,7 +1226,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
* given above. * given above.
*/ */
Assert(localExecutionSupported == false); Assert(localExecutionSupported == false);
#endif
} }
MemoryContext savedContext = CurrentMemoryContext; MemoryContext savedContext = CurrentMemoryContext;
@ -1340,8 +1291,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
} }
#if PG_VERSION_NUM >= 140000
/* /*
* set_indexsafe_procflags sets PROC_IN_SAFE_IC flag in MyProc->statusFlags. * set_indexsafe_procflags sets PROC_IN_SAFE_IC flag in MyProc->statusFlags.
* *
@ -1364,9 +1313,6 @@ set_indexsafe_procflags(void)
} }
#endif
/* /*
* CurrentSearchPath is a C interface for calling current_schemas(bool) that * CurrentSearchPath is a C interface for calling current_schemas(bool) that
* PostgreSQL exports. * PostgreSQL exports.

View File

@ -359,12 +359,12 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
{ {
appendStringInfoString(vacuumPrefix, "SKIP_LOCKED,"); appendStringInfoString(vacuumPrefix, "SKIP_LOCKED,");
} }
#if PG_VERSION_NUM >= PG_VERSION_14
if (vacuumFlags & VACOPT_PROCESS_TOAST) if (vacuumFlags & VACOPT_PROCESS_TOAST)
{ {
appendStringInfoString(vacuumPrefix, "PROCESS_TOAST,"); appendStringInfoString(vacuumPrefix, "PROCESS_TOAST,");
} }
#endif
if (vacuumParams.truncate != VACOPTVALUE_UNSPECIFIED) if (vacuumParams.truncate != VACOPTVALUE_UNSPECIFIED)
{ {
appendStringInfoString(vacuumPrefix, appendStringInfoString(vacuumPrefix,
@ -389,13 +389,11 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
break; break;
} }
#if PG_VERSION_NUM >= PG_VERSION_14
case VACOPTVALUE_AUTO: case VACOPTVALUE_AUTO:
{ {
appendStringInfoString(vacuumPrefix, "INDEX_CLEANUP auto,"); appendStringInfoString(vacuumPrefix, "INDEX_CLEANUP auto,");
break; break;
} }
#endif
default: default:
{ {
@ -501,9 +499,7 @@ VacuumStmtParams(VacuumStmt *vacstmt)
bool freeze = false; bool freeze = false;
bool full = false; bool full = false;
bool disable_page_skipping = false; bool disable_page_skipping = false;
#if PG_VERSION_NUM >= PG_VERSION_14
bool process_toast = false; bool process_toast = false;
#endif
/* Set default value */ /* Set default value */
params.index_cleanup = VACOPTVALUE_UNSPECIFIED; params.index_cleanup = VACOPTVALUE_UNSPECIFIED;
@ -547,16 +543,12 @@ VacuumStmtParams(VacuumStmt *vacstmt)
{ {
disable_page_skipping = defGetBoolean(opt); disable_page_skipping = defGetBoolean(opt);
} }
#if PG_VERSION_NUM >= PG_VERSION_14
else if (strcmp(opt->defname, "process_toast") == 0) else if (strcmp(opt->defname, "process_toast") == 0)
{ {
process_toast = defGetBoolean(opt); process_toast = defGetBoolean(opt);
} }
#endif
else if (strcmp(opt->defname, "index_cleanup") == 0) else if (strcmp(opt->defname, "index_cleanup") == 0)
{ {
#if PG_VERSION_NUM >= PG_VERSION_14
/* Interpret no string as the default, which is 'auto' */ /* Interpret no string as the default, which is 'auto' */
if (!opt->arg) if (!opt->arg)
{ {
@ -577,10 +569,6 @@ VacuumStmtParams(VacuumStmt *vacstmt)
VACOPTVALUE_DISABLED; VACOPTVALUE_DISABLED;
} }
} }
#else
params.index_cleanup = defGetBoolean(opt) ? VACOPTVALUE_ENABLED :
VACOPTVALUE_DISABLED;
#endif
} }
else if (strcmp(opt->defname, "truncate") == 0) else if (strcmp(opt->defname, "truncate") == 0)
{ {
@ -625,9 +613,7 @@ VacuumStmtParams(VacuumStmt *vacstmt)
(analyze ? VACOPT_ANALYZE : 0) | (analyze ? VACOPT_ANALYZE : 0) |
(freeze ? VACOPT_FREEZE : 0) | (freeze ? VACOPT_FREEZE : 0) |
(full ? VACOPT_FULL : 0) | (full ? VACOPT_FULL : 0) |
#if PG_VERSION_NUM >= PG_VERSION_14
(process_toast ? VACOPT_PROCESS_TOAST : 0) | (process_toast ? VACOPT_PROCESS_TOAST : 0) |
#endif
(disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0); (disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0);
return params; return params;
} }

View File

@ -598,7 +598,7 @@ List *
PostprocessAlterViewStmt(Node *node, const char *queryString) PostprocessAlterViewStmt(Node *node, const char *queryString)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_VIEW); Assert(stmt->objtype == OBJECT_VIEW);
List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true, true); List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true, true);

View File

@ -1314,33 +1314,6 @@ StartConnectionEstablishment(MultiConnection *connection, ConnectionHashKey *key
} }
#if PG_VERSION_NUM < 140000
/*
* WarmUpConnParamsHash warms up the ConnParamsHash by loading all the
* conn params for active primary nodes.
*/
void
WarmUpConnParamsHash(void)
{
List *workerNodeList = ActivePrimaryNodeList(AccessShareLock);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
{
ConnectionHashKey key;
strlcpy(key.hostname, workerNode->workerName, MAX_NODE_LENGTH);
key.port = workerNode->workerPort;
strlcpy(key.database, CurrentDatabaseName(), NAMEDATALEN);
strlcpy(key.user, CurrentUserName(), NAMEDATALEN);
key.replicationConnParam = false;
FindOrCreateConnParamsEntry(&key);
}
}
#endif
/* /*
* FindOrCreateConnParamsEntry searches ConnParamsHash for the given key, * FindOrCreateConnParamsEntry searches ConnParamsHash for the given key,
* if it is not found, it is created. * if it is not found, it is created.

View File

@ -22,9 +22,7 @@
#include "access/skey.h" #include "access/skey.h"
#include "access/stratnum.h" #include "access/stratnum.h"
#include "access/sysattr.h" #include "access/sysattr.h"
#if PG_VERSION_NUM >= PG_VERSION_14
#include "access/toast_compression.h" #include "access/toast_compression.h"
#endif
#include "access/tupdesc.h" #include "access/tupdesc.h"
#include "catalog/dependency.h" #include "catalog/dependency.h"
#include "catalog/indexing.h" #include "catalog/indexing.h"
@ -386,13 +384,11 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
atttypmod); atttypmod);
appendStringInfoString(&buffer, attributeTypeName); appendStringInfoString(&buffer, attributeTypeName);
#if PG_VERSION_NUM >= PG_VERSION_14
if (CompressionMethodIsValid(attributeForm->attcompression)) if (CompressionMethodIsValid(attributeForm->attcompression))
{ {
appendStringInfo(&buffer, " COMPRESSION %s", appendStringInfo(&buffer, " COMPRESSION %s",
GetCompressionMethodName(attributeForm->attcompression)); GetCompressionMethodName(attributeForm->attcompression));
} }
#endif
if (attributeForm->attidentity && includeIdentityDefaults) if (attributeForm->attidentity && includeIdentityDefaults)
{ {
@ -939,17 +935,6 @@ deparse_shard_reindex_statement(ReindexStmt *origStmt, Oid distrelid, int64 shar
bool bool
IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param) IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param)
{ {
#if PG_VERSION_NUM < PG_VERSION_14
if (strcmp(param, "concurrently") == 0)
{
return reindexStmt->concurrent;
}
else if (strcmp(param, "verbose") == 0)
{
return reindexStmt->options & REINDEXOPT_VERBOSE;
}
return false;
#else
DefElem *opt = NULL; DefElem *opt = NULL;
foreach_ptr(opt, reindexStmt->params) foreach_ptr(opt, reindexStmt->params)
{ {
@ -959,7 +944,6 @@ IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param)
} }
} }
return false; return false;
#endif
} }
@ -974,7 +958,7 @@ AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer)
{ {
appendStringInfoString(temp, "VERBOSE"); appendStringInfoString(temp, "VERBOSE");
} }
#if PG_VERSION_NUM >= PG_VERSION_14
char *tableSpaceName = NULL; char *tableSpaceName = NULL;
DefElem *opt = NULL; DefElem *opt = NULL;
foreach_ptr(opt, reindexStmt->params) foreach_ptr(opt, reindexStmt->params)
@ -997,7 +981,6 @@ AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer)
appendStringInfo(temp, "TABLESPACE %s", tableSpaceName); appendStringInfo(temp, "TABLESPACE %s", tableSpaceName);
} }
} }
#endif
if (temp->len > 0) if (temp->len > 0)
{ {
@ -1627,9 +1610,7 @@ RoleSpecString(RoleSpec *spec, bool withQuoteIdentifier)
spec->rolename; spec->rolename;
} }
#if PG_VERSION_NUM >= PG_VERSION_14
case ROLESPEC_CURRENT_ROLE: case ROLESPEC_CURRENT_ROLE:
#endif
case ROLESPEC_CURRENT_USER: case ROLESPEC_CURRENT_USER:
{ {
return withQuoteIdentifier ? return withQuoteIdentifier ?

View File

@ -193,7 +193,7 @@ DeparseAlterSequenceOwnerStmt(Node *node)
StringInfoData str = { 0 }; StringInfoData str = { 0 };
initStringInfo(&str); initStringInfo(&str);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
AppendAlterSequenceOwnerStmt(&str, stmt); AppendAlterSequenceOwnerStmt(&str, stmt);
@ -208,7 +208,7 @@ DeparseAlterSequenceOwnerStmt(Node *node)
static void static void
AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt) AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt)
{ {
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
RangeVar *seq = stmt->relation; RangeVar *seq = stmt->relation;
char *qualifiedSequenceName = quote_qualified_identifier(seq->schemaname, char *qualifiedSequenceName = quote_qualified_identifier(seq->schemaname,
seq->relname); seq->relname);
@ -274,7 +274,7 @@ DeparseAlterSequencePersistenceStmt(Node *node)
StringInfoData str = { 0 }; StringInfoData str = { 0 };
initStringInfo(&str); initStringInfo(&str);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
AppendAlterSequencePersistenceStmt(&str, stmt); AppendAlterSequencePersistenceStmt(&str, stmt);
@ -289,7 +289,7 @@ DeparseAlterSequencePersistenceStmt(Node *node)
static void static void
AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt) AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt)
{ {
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
RangeVar *seq = stmt->relation; RangeVar *seq = stmt->relation;
char *qualifiedSequenceName = quote_qualified_identifier(seq->schemaname, char *qualifiedSequenceName = quote_qualified_identifier(seq->schemaname,

View File

@ -229,7 +229,6 @@ AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt)
} }
#if PG_VERSION_NUM >= PG_VERSION_14
static void static void
AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt) AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
{ {
@ -257,36 +256,6 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
} }
#else
static void
AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
{
ColumnRef *column = NULL;
foreach_ptr(column, stmt->exprs)
{
if (!IsA(column, ColumnRef) || list_length(column->fields) != 1)
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(
"only simple column references are allowed in CREATE STATISTICS")));
}
char *columnName = NameListToQuotedString(column->fields);
appendStringInfoString(buf, columnName);
if (column != llast(stmt->exprs))
{
appendStringInfoString(buf, ", ");
}
}
}
#endif
static void static void
AppendTableName(StringInfo buf, CreateStatsStmt *stmt) AppendTableName(StringInfo buf, CreateStatsStmt *stmt)
{ {

View File

@ -77,7 +77,7 @@ DeparseAlterTableStmt(Node *node)
StringInfoData str = { 0 }; StringInfoData str = { 0 };
initStringInfo(&str); initStringInfo(&str);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TABLE); Assert(stmt->objtype == OBJECT_TABLE);
AppendAlterTableStmt(&str, stmt); AppendAlterTableStmt(&str, stmt);
return str.data; return str.data;
@ -96,7 +96,7 @@ AppendAlterTableStmt(StringInfo buf, AlterTableStmt *stmt)
stmt->relation->relname); stmt->relation->relname);
ListCell *cmdCell = NULL; ListCell *cmdCell = NULL;
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TABLE); Assert(stmt->objtype == OBJECT_TABLE);
appendStringInfo(buf, "ALTER TABLE %s", identifier); appendStringInfo(buf, "ALTER TABLE %s", identifier);
foreach(cmdCell, stmt->cmds) foreach(cmdCell, stmt->cmds)

View File

@ -122,7 +122,7 @@ DeparseAlterTypeStmt(Node *node)
StringInfoData str = { 0 }; StringInfoData str = { 0 };
initStringInfo(&str); initStringInfo(&str);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE); Assert(stmt->objtype == OBJECT_TYPE);
AppendAlterTypeStmt(&str, stmt); AppendAlterTypeStmt(&str, stmt);
@ -137,7 +137,7 @@ AppendAlterTypeStmt(StringInfo buf, AlterTableStmt *stmt)
stmt->relation->relname); stmt->relation->relname);
ListCell *cmdCell = NULL; ListCell *cmdCell = NULL;
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE); Assert(stmt->objtype == OBJECT_TYPE);
appendStringInfo(buf, "ALTER TYPE %s", identifier); appendStringInfo(buf, "ALTER TYPE %s", identifier);
foreach(cmdCell, stmt->cmds) foreach(cmdCell, stmt->cmds)

View File

@ -245,11 +245,7 @@ QualifyCollate(CollateClause *collClause, bool missing_ok)
List *objName = NIL; List *objName = NIL;
List *objArgs = NIL; List *objArgs = NIL;
#if PG_VERSION_NUM >= PG_VERSION_14
getObjectIdentityParts(&collationAddress, &objName, &objArgs, false); getObjectIdentityParts(&collationAddress, &objName, &objArgs, false);
#else
getObjectIdentityParts(&collationAddress, &objName, &objArgs);
#endif
collClause->collname = NIL; collClause->collname = NIL;
char *name = NULL; char *name = NULL;

View File

@ -34,7 +34,7 @@ void
QualifyAlterSequenceOwnerStmt(Node *node) QualifyAlterSequenceOwnerStmt(Node *node)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
RangeVar *seq = stmt->relation; RangeVar *seq = stmt->relation;
@ -62,7 +62,7 @@ void
QualifyAlterSequencePersistenceStmt(Node *node) QualifyAlterSequencePersistenceStmt(Node *node)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); Assert(stmt->objtype == OBJECT_SEQUENCE);
RangeVar *seq = stmt->relation; RangeVar *seq = stmt->relation;

View File

@ -123,7 +123,7 @@ void
QualifyAlterTypeStmt(Node *node) QualifyAlterTypeStmt(Node *node)
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE); Assert(stmt->objtype == OBJECT_TYPE);
if (stmt->relation->schemaname == NULL) if (stmt->relation->schemaname == NULL)
{ {

File diff suppressed because it is too large Load Diff

View File

@ -496,11 +496,7 @@ struct TaskPlacementExecution;
/* GUC, determining whether Citus opens 1 connection per task */ /* GUC, determining whether Citus opens 1 connection per task */
bool ForceMaxQueryParallelization = false; bool ForceMaxQueryParallelization = false;
int MaxAdaptiveExecutorPoolSize = 16; int MaxAdaptiveExecutorPoolSize = 16;
#if PG_VERSION_NUM >= PG_VERSION_14
bool EnableBinaryProtocol = true; bool EnableBinaryProtocol = true;
#else
bool EnableBinaryProtocol = false;
#endif
/* GUC, number of ms to wait between opening connections to the same worker */ /* GUC, number of ms to wait between opening connections to the same worker */
int ExecutorSlowStartInterval = 10; int ExecutorSlowStartInterval = 10;

View File

@ -455,9 +455,9 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript
location); location);
copyOptions = lappend(copyOptions, copyOption); copyOptions = lappend(copyOptions, copyOption);
CopyFromState copyState = BeginCopyFrom_compat(NULL, stubRelation, NULL, CopyFromState copyState = BeginCopyFrom(NULL, stubRelation, NULL,
fileName, false, NULL, fileName, false, NULL,
NULL, copyOptions); NULL, copyOptions);
while (true) while (true)
{ {

View File

@ -797,11 +797,7 @@ BuildExistingQueryIdHash(void)
{ {
const int userIdAttributeNumber = 1; const int userIdAttributeNumber = 1;
const int dbIdAttributeNumber = 2; const int dbIdAttributeNumber = 2;
#if PG_VERSION_NUM >= PG_VERSION_14
const int queryIdAttributeNumber = 4; const int queryIdAttributeNumber = 4;
#else
const int queryIdAttributeNumber = 3;
#endif
Datum commandTypeDatum = (Datum) 0; Datum commandTypeDatum = (Datum) 0;
bool missingOK = true; bool missingOK = true;

View File

@ -896,18 +896,11 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress)
return NULL; return NULL;
} }
char *objectDescription = NULL;
char *dependencyDescription = NULL;
StringInfo errorInfo = makeStringInfo(); StringInfo errorInfo = makeStringInfo();
StringInfo detailInfo = makeStringInfo(); StringInfo detailInfo = makeStringInfo();
#if PG_VERSION_NUM >= PG_VERSION_14 char *objectDescription = getObjectDescription(objectAddress, false);
objectDescription = getObjectDescription(objectAddress, false); char *dependencyDescription = getObjectDescription(undistributableDependency, false);
dependencyDescription = getObjectDescription(undistributableDependency, false);
#else
objectDescription = getObjectDescription(objectAddress);
dependencyDescription = getObjectDescription(undistributableDependency);
#endif
/* /*
* We expect callers to interpret the error returned from this function * We expect callers to interpret the error returned from this function

View File

@ -85,12 +85,12 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS)
{ {
ereport(ERROR, (errmsg("object still exists"), ereport(ERROR, (errmsg("object still exists"),
errdetail("the %s \"%s\" still exists", errdetail("the %s \"%s\" still exists",
getObjectTypeDescription_compat(&address, getObjectTypeDescription(&address,
/* missingOk: */ false), /* missingOk: */ false),
getObjectIdentity_compat(&address, getObjectIdentity(&address,
/* missingOk: */ false)), /* missingOk: */ false)),
errhint("drop the object via a DROP command"))); errhint("drop the object via a DROP command")));
} }

View File

@ -916,15 +916,9 @@ MarkObjectsDistributedCreateCommand(List *addresses,
int forceDelegation = list_nth_int(forceDelegations, currentObjectCounter); int forceDelegation = list_nth_int(forceDelegations, currentObjectCounter);
List *names = NIL; List *names = NIL;
List *args = NIL; List *args = NIL;
char *objectType = NULL;
#if PG_VERSION_NUM >= PG_VERSION_14 char *objectType = getObjectTypeDescription(address, false);
objectType = getObjectTypeDescription(address, false);
getObjectIdentityParts(address, &names, &args, false); getObjectIdentityParts(address, &names, &args, false);
#else
objectType = getObjectTypeDescription(address);
getObjectIdentityParts(address, &names, &args);
#endif
if (!isFirstObject) if (!isFirstObject)
{ {

View File

@ -4031,11 +4031,7 @@ CancelTasksForJob(int64 jobid)
errmsg("must be a superuser to cancel superuser tasks"))); errmsg("must be a superuser to cancel superuser tasks")));
} }
else if (!has_privs_of_role(GetUserId(), taskOwner) && else if (!has_privs_of_role(GetUserId(), taskOwner) &&
#if PG_VERSION_NUM >= 140000
!has_privs_of_role(GetUserId(), ROLE_PG_SIGNAL_BACKEND)) !has_privs_of_role(GetUserId(), ROLE_PG_SIGNAL_BACKEND))
#else
!has_privs_of_role(GetUserId(), DEFAULT_ROLE_SIGNAL_BACKENDID))
#endif
{ {
/* user doesn't have the permissions to cancel this job */ /* user doesn't have the permissions to cancel this job */
ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),

View File

@ -31,11 +31,7 @@
#include "utils/guc.h" #include "utils/guc.h"
#include "utils/hsearch.h" #include "utils/hsearch.h"
#include "utils/memutils.h" #include "utils/memutils.h"
#if PG_VERSION_NUM < PG_VERSION_13
#include "utils/hashutils.h"
#else
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
/* Config variables managed via guc.c */ /* Config variables managed via guc.c */

View File

@ -527,13 +527,13 @@ LocalCopyToShard(ShardCopyDestReceiver *copyDest, CopyOutState localCopyOutState
false /* inFromCl */); false /* inFromCl */);
List *options = (isBinaryCopy) ? list_make1(binaryFormatOption) : NULL; List *options = (isBinaryCopy) ? list_make1(binaryFormatOption) : NULL;
CopyFromState cstate = BeginCopyFrom_compat(pState, shard, CopyFromState cstate = BeginCopyFrom(pState, shard,
NULL /* whereClause */, NULL /* whereClause */,
NULL /* fileName */, NULL /* fileName */,
false /* is_program */, false /* is_program */,
ReadFromLocalBufferCallback, ReadFromLocalBufferCallback,
NULL /* attlist (NULL is all columns) */, NULL /* attlist (NULL is all columns) */,
options); options);
CopyFrom(cstate); CopyFrom(cstate);
EndCopyFrom(cstate); EndCopyFrom(cstate);
resetStringInfo(localCopyOutState->fe_msgbuf); resetStringInfo(localCopyOutState->fe_msgbuf);

View File

@ -861,8 +861,8 @@ RouterModifyTaskForShardInterval(Query *originalQuery,
* Note that this is only the case with PG14 as the parameter doesn't exist * Note that this is only the case with PG14 as the parameter doesn't exist
* prior to that. * prior to that.
*/ */
shardRestrictionList = make_simple_restrictinfo_compat(NULL, shardRestrictionList = make_simple_restrictinfo(NULL,
(Expr *) shardOpExpressions); (Expr *) shardOpExpressions);
extendedBaseRestrictInfo = lappend(extendedBaseRestrictInfo, extendedBaseRestrictInfo = lappend(extendedBaseRestrictInfo,
shardRestrictionList); shardRestrictionList);

View File

@ -1101,8 +1101,8 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
TupleDesc tupleDescriptor = NULL; TupleDesc tupleDescriptor = NULL;
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
DestReceiver *tupleStoreDest = CreateTuplestoreDestReceiver(); DestReceiver *tupleStoreDest = CreateTuplestoreDestReceiver();
SetTuplestoreDestReceiverParams_compat(tupleStoreDest, tupleStore, SetTuplestoreDestReceiverParams(tupleStoreDest, tupleStore,
CurrentMemoryContext, false, NULL, NULL); CurrentMemoryContext, false, NULL, NULL);
List *parseTreeList = pg_parse_query(queryString); List *parseTreeList = pg_parse_query(queryString);
if (list_length(parseTreeList) != 1) if (list_length(parseTreeList) != 1)
@ -1126,15 +1126,9 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
Query *analyzedQuery = parse_analyze_varparams_compat(parseTree, queryString, Query *analyzedQuery = parse_analyze_varparams_compat(parseTree, queryString,
&paramTypes, &numParams, NULL); &paramTypes, &numParams, NULL);
#if PG_VERSION_NUM >= PG_VERSION_14
/* pg_rewrite_query is a wrapper around QueryRewrite with some debugging logic */ /* pg_rewrite_query is a wrapper around QueryRewrite with some debugging logic */
List *queryList = pg_rewrite_query(analyzedQuery); List *queryList = pg_rewrite_query(analyzedQuery);
#else
/* pg_rewrite_query is not yet public in PostgreSQL 13 */
List *queryList = QueryRewrite(analyzedQuery);
#endif
if (list_length(queryList) != 1) if (list_length(queryList) != 1)
{ {
ereport(ERROR, (errmsg("cannot EXPLAIN ANALYZE a query rewritten " ereport(ERROR, (errmsg("cannot EXPLAIN ANALYZE a query rewritten "

View File

@ -1855,11 +1855,7 @@ MasterAggregateExpression(Aggref *originalAggregate,
{ {
/* array_cat_agg() takes anyarray as input */ /* array_cat_agg() takes anyarray as input */
catAggregateName = ARRAY_CAT_AGGREGATE_NAME; catAggregateName = ARRAY_CAT_AGGREGATE_NAME;
#if PG_VERSION_NUM >= PG_VERSION_14
catInputType = ANYCOMPATIBLEARRAYOID; catInputType = ANYCOMPATIBLEARRAYOID;
#else
catInputType = ANYARRAYOID;
#endif
} }
else if (aggregateType == AGGREGATE_JSONB_AGG || else if (aggregateType == AGGREGATE_JSONB_AGG ||
aggregateType == AGGREGATE_JSONB_OBJECT_AGG) aggregateType == AGGREGATE_JSONB_OBJECT_AGG)
@ -1897,8 +1893,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
if (aggregateType == AGGREGATE_ARRAY_AGG) if (aggregateType == AGGREGATE_ARRAY_AGG)
{ {
#if PG_VERSION_NUM >= PG_VERSION_14
/* /*
* Postgres expects the type of the array here such as INT4ARRAYOID. * Postgres expects the type of the array here such as INT4ARRAYOID.
* Hence we set it to workerReturnType. If we set this to * Hence we set it to workerReturnType. If we set this to
@ -1906,9 +1900,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
* "argument declared anycompatiblearray is not an array but type anycompatiblearray" * "argument declared anycompatiblearray is not an array but type anycompatiblearray"
*/ */
newMasterAggregate->aggargtypes = list_make1_oid(workerReturnType); newMasterAggregate->aggargtypes = list_make1_oid(workerReturnType);
#else
newMasterAggregate->aggargtypes = list_make1_oid(ANYARRAYOID);
#endif
} }
else else
{ {
@ -3625,8 +3616,8 @@ static Oid
CitusFunctionOidWithSignature(char *functionName, int numargs, Oid *argtypes) CitusFunctionOidWithSignature(char *functionName, int numargs, Oid *argtypes)
{ {
List *aggregateName = list_make2(makeString("pg_catalog"), makeString(functionName)); List *aggregateName = list_make2(makeString("pg_catalog"), makeString(functionName));
FuncCandidateList clist = FuncnameGetCandidates_compat(aggregateName, numargs, NIL, FuncCandidateList clist = FuncnameGetCandidates(aggregateName, numargs, NIL,
false, false, false, true); false, false, false, true);
for (; clist; clist = clist->next) for (; clist; clist = clist->next)
{ {

View File

@ -152,10 +152,8 @@ static List * ExtractInsertValuesList(Query *query, Var *partitionColumn);
static DeferredErrorMessage * DeferErrorIfUnsupportedRouterPlannableSelectQuery( static DeferredErrorMessage * DeferErrorIfUnsupportedRouterPlannableSelectQuery(
Query *query); Query *query);
static DeferredErrorMessage * ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree); static DeferredErrorMessage * ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree);
#if PG_VERSION_NUM >= PG_VERSION_14
static DeferredErrorMessage * ErrorIfQueryHasCTEWithSearchClause(Query *queryTree); static DeferredErrorMessage * ErrorIfQueryHasCTEWithSearchClause(Query *queryTree);
static bool ContainsSearchClauseWalker(Node *node, void *context); static bool ContainsSearchClauseWalker(Node *node, void *context);
#endif
static bool SelectsFromDistributedTable(List *rangeTableList, Query *query); static bool SelectsFromDistributedTable(List *rangeTableList, Query *query);
static ShardPlacement * CreateDummyPlacement(bool hasLocalRelation); static ShardPlacement * CreateDummyPlacement(bool hasLocalRelation);
static ShardPlacement * CreateLocalDummyPlacement(); static ShardPlacement * CreateLocalDummyPlacement();
@ -1118,14 +1116,12 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
} }
} }
#if PG_VERSION_NUM >= PG_VERSION_14
DeferredErrorMessage *CTEWithSearchClauseError = DeferredErrorMessage *CTEWithSearchClauseError =
ErrorIfQueryHasCTEWithSearchClause(originalQuery); ErrorIfQueryHasCTEWithSearchClause(originalQuery);
if (CTEWithSearchClauseError != NULL) if (CTEWithSearchClauseError != NULL)
{ {
return CTEWithSearchClauseError; return CTEWithSearchClauseError;
} }
#endif
return NULL; return NULL;
} }
@ -3758,14 +3754,12 @@ DeferErrorIfUnsupportedRouterPlannableSelectQuery(Query *query)
NULL, NULL); NULL, NULL);
} }
#if PG_VERSION_NUM >= PG_VERSION_14
DeferredErrorMessage *CTEWithSearchClauseError = DeferredErrorMessage *CTEWithSearchClauseError =
ErrorIfQueryHasCTEWithSearchClause(query); ErrorIfQueryHasCTEWithSearchClause(query);
if (CTEWithSearchClauseError != NULL) if (CTEWithSearchClauseError != NULL)
{ {
return CTEWithSearchClauseError; return CTEWithSearchClauseError;
} }
#endif
return ErrorIfQueryHasUnroutableModifyingCTE(query); return ErrorIfQueryHasUnroutableModifyingCTE(query);
} }
@ -3900,8 +3894,6 @@ ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree)
} }
#if PG_VERSION_NUM >= PG_VERSION_14
/* /*
* ErrorIfQueryHasCTEWithSearchClause checks if the query contains any common table * ErrorIfQueryHasCTEWithSearchClause checks if the query contains any common table
* expressions with search clause and errors out if it does. * expressions with search clause and errors out if it does.
@ -3948,9 +3940,6 @@ ContainsSearchClauseWalker(Node *node, void *context)
} }
#endif
/* /*
* get_all_actual_clauses * get_all_actual_clauses
* *

View File

@ -2143,8 +2143,8 @@ GetRestrictInfoListForRelation(RangeTblEntry *rangeTblEntry,
* If the restriction involves multiple tables, we cannot add it to * If the restriction involves multiple tables, we cannot add it to
* input relation's expression list. * input relation's expression list.
*/ */
Relids varnos = pull_varnos_compat(relationRestriction->plannerInfo, Relids varnos = pull_varnos(relationRestriction->plannerInfo,
(Node *) restrictionClause); (Node *) restrictionClause);
if (bms_num_members(varnos) != 1) if (bms_num_members(varnos) != 1)
{ {
continue; continue;

View File

@ -1536,7 +1536,7 @@ CreateSubscriptions(MultiConnection *sourceConnection,
quote_identifier(target->publication->name), quote_identifier(target->publication->name),
quote_identifier(target->replicationSlot->name)); quote_identifier(target->replicationSlot->name));
if (EnableBinaryProtocol && PG_VERSION_NUM >= PG_VERSION_14) if (EnableBinaryProtocol)
{ {
appendStringInfoString(createSubscriptionCommand, ", binary=true)"); appendStringInfoString(createSubscriptionCommand, ", binary=true)");
} }

View File

@ -1215,11 +1215,7 @@ RegisterCitusConfigVariables(void)
"Enables communication between nodes using binary protocol when possible"), "Enables communication between nodes using binary protocol when possible"),
NULL, NULL,
&EnableBinaryProtocol, &EnableBinaryProtocol,
#if PG_VERSION_NUM >= PG_VERSION_14
true, true,
#else
false,
#endif
PGC_USERSET, PGC_USERSET,
GUC_STANDARD, GUC_STANDARD,
NULL, NULL, NULL); NULL, NULL, NULL);

View File

@ -169,7 +169,6 @@ fake_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
} }
#if PG_VERSION_NUM >= PG_VERSION_14
static TransactionId static TransactionId
fake_index_delete_tuples(Relation rel, fake_index_delete_tuples(Relation rel,
TM_IndexDeleteOp *delstate) TM_IndexDeleteOp *delstate)
@ -179,20 +178,6 @@ fake_index_delete_tuples(Relation rel,
} }
#else
static TransactionId
fake_compute_xid_horizon_for_tuples(Relation rel,
ItemPointerData *tids,
int nitems)
{
elog(ERROR, "fake_compute_xid_horizon_for_tuples not implemented");
return InvalidTransactionId;
}
#endif
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* Functions for manipulations of physical tuples for fake AM. * Functions for manipulations of physical tuples for fake AM.
* ---------------------------------------------------------------------------- * ----------------------------------------------------------------------------
@ -568,11 +553,7 @@ static const TableAmRoutine fake_methods = {
.tuple_get_latest_tid = fake_get_latest_tid, .tuple_get_latest_tid = fake_get_latest_tid,
.tuple_tid_valid = fake_tuple_tid_valid, .tuple_tid_valid = fake_tuple_tid_valid,
.tuple_satisfies_snapshot = fake_tuple_satisfies_snapshot, .tuple_satisfies_snapshot = fake_tuple_satisfies_snapshot,
#if PG_VERSION_NUM >= PG_VERSION_14
.index_delete_tuples = fake_index_delete_tuples, .index_delete_tuples = fake_index_delete_tuples,
#else
.compute_xid_horizon_for_tuples = fake_compute_xid_horizon_for_tuples,
#endif
.relation_set_new_filenode = fake_relation_set_new_filenode, .relation_set_new_filenode = fake_relation_set_new_filenode,
.relation_nontransactional_truncate = fake_relation_nontransactional_truncate, .relation_nontransactional_truncate = fake_relation_nontransactional_truncate,

View File

@ -48,8 +48,8 @@ MemoryContextTotalSpace(MemoryContext context)
Size totalSpace = 0; Size totalSpace = 0;
MemoryContextCounters totals = { 0 }; MemoryContextCounters totals = { 0 };
TopTransactionContext->methods->stats_compat(TopTransactionContext, NULL, NULL, TopTransactionContext->methods->stats(TopTransactionContext, NULL, NULL,
&totals, true); &totals, true);
totalSpace += totals.totalspace; totalSpace += totals.totalspace;
for (MemoryContext child = context->firstchild; for (MemoryContext child = context->firstchild;

View File

@ -503,11 +503,7 @@ UserHasPermissionToViewStatsOf(Oid currentUserId, Oid backendOwnedId)
} }
if (is_member_of_role(currentUserId, if (is_member_of_role(currentUserId,
#if PG_VERSION_NUM >= PG_VERSION_14
ROLE_PG_READ_ALL_STATS)) ROLE_PG_READ_ALL_STATS))
#else
DEFAULT_ROLE_READ_ALL_STATS))
#endif
{ {
return true; return true;
} }

View File

@ -664,7 +664,7 @@ IsProcessWaitingForSafeOperations(PGPROC *proc)
return false; return false;
} }
if (pgproc_statusflags_compat(proc) & PROC_IS_AUTOVACUUM) if (proc->statusFlags & PROC_IS_AUTOVACUUM)
{ {
return true; return true;
} }

View File

@ -1436,13 +1436,11 @@ error_severity(int elevel)
break; break;
} }
#if PG_VERSION_NUM >= PG_VERSION_14
case WARNING_CLIENT_ONLY: case WARNING_CLIENT_ONLY:
{ {
prefix = gettext_noop("WARNING"); prefix = gettext_noop("WARNING");
break; break;
} }
#endif
case ERROR: case ERROR:
{ {

View File

@ -528,9 +528,9 @@ FixFunctionArgumentsWalker(Node *expr, void *context)
elog(ERROR, "cache lookup failed for function %u", funcExpr->funcid); elog(ERROR, "cache lookup failed for function %u", funcExpr->funcid);
} }
funcExpr->args = expand_function_arguments_compat(funcExpr->args, false, funcExpr->args = expand_function_arguments(funcExpr->args, false,
funcExpr->funcresulttype, funcExpr->funcresulttype,
func_tuple); func_tuple);
ReleaseSysCache(func_tuple); ReleaseSysCache(func_tuple);
} }

View File

@ -19,11 +19,6 @@
* done before including libpq.h. * done before including libpq.h.
*/ */
#include "distributed/pg_version_constants.h" #include "distributed/pg_version_constants.h"
#if PG_VERSION_NUM < PG_VERSION_14
#ifndef OPENSSL_API_COMPAT
#define OPENSSL_API_COMPAT 0x1000100L
#endif
#endif
#include "distributed/connection_management.h" #include "distributed/connection_management.h"
#include "distributed/memutils.h" #include "distributed/memutils.h"

View File

@ -46,7 +46,7 @@ FunctionOidExtended(const char *schemaName, const char *functionName, int argume
const bool findVariadics = false; const bool findVariadics = false;
const bool findDefaults = false; const bool findDefaults = false;
FuncCandidateList functionList = FuncnameGetCandidates_compat( FuncCandidateList functionList = FuncnameGetCandidates(
qualifiedFunctionNameList, qualifiedFunctionNameList,
argumentCount, argumentCount,
argumentList, argumentList,

View File

@ -118,9 +118,7 @@ ListToHashSet(List *itemList, Size keySize, bool isStringList)
if (isStringList) if (isStringList)
{ {
#if PG_VERSION_NUM >= PG_VERSION_14
flags |= HASH_STRINGS; flags |= HASH_STRINGS;
#endif
} }
else else
{ {

View File

@ -18,9 +18,7 @@
#include "utils/builtins.h" #include "utils/builtins.h"
#if PG_VERSION_NUM >= PG_VERSION_14
#include "common/cryptohash.h" #include "common/cryptohash.h"
#endif
/* /*

View File

@ -1023,7 +1023,7 @@ IsParentTable(Oid relationId)
Oid Oid
PartitionParentOid(Oid partitionOid) PartitionParentOid(Oid partitionOid)
{ {
Oid partitionParentOid = get_partition_parent_compat(partitionOid, false); Oid partitionParentOid = get_partition_parent(partitionOid, false);
return partitionParentOid; return partitionParentOid;
} }
@ -1074,7 +1074,7 @@ PartitionList(Oid parentRelationId)
ereport(ERROR, (errmsg("\"%s\" is not a parent table", relationName))); ereport(ERROR, (errmsg("\"%s\" is not a parent table", relationName)));
} }
PartitionDesc partDesc = RelationGetPartitionDesc_compat(rel, true); PartitionDesc partDesc = RelationGetPartitionDesc(rel, true);
Assert(partDesc != NULL); Assert(partDesc != NULL);
int partitionCount = partDesc->nparts; int partitionCount = partDesc->nparts;
@ -1107,7 +1107,7 @@ GenerateDetachPartitionCommand(Oid partitionTableId)
ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName))); ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName)));
} }
Oid parentId = get_partition_parent_compat(partitionTableId, false); Oid parentId = get_partition_parent(partitionTableId, false);
char *tableQualifiedName = generate_qualified_relation_name(partitionTableId); char *tableQualifiedName = generate_qualified_relation_name(partitionTableId);
char *parentTableQualifiedName = generate_qualified_relation_name(parentId); char *parentTableQualifiedName = generate_qualified_relation_name(parentId);
@ -1221,7 +1221,7 @@ GenerateAlterTableAttachPartitionCommand(Oid partitionTableId)
ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName))); ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName)));
} }
Oid parentId = get_partition_parent_compat(partitionTableId, false); Oid parentId = get_partition_parent(partitionTableId, false);
char *tableQualifiedName = generate_qualified_relation_name(partitionTableId); char *tableQualifiedName = generate_qualified_relation_name(partitionTableId);
char *parentTableQualifiedName = generate_qualified_relation_name(parentId); char *parentTableQualifiedName = generate_qualified_relation_name(parentId);

View File

@ -22,29 +22,6 @@
ExecARDeleteTriggers(a, b, c, d, e) ExecARDeleteTriggers(a, b, c, d, e)
#endif #endif
#if PG_VERSION_NUM >= PG_VERSION_14
#define ColumnarProcessUtility_compat(a, b, c, d, e, f, g, h) \
ColumnarProcessUtility(a, b, c, d, e, f, g, h)
#define PrevProcessUtilityHook_compat(a, b, c, d, e, f, g, h) \
PrevProcessUtilityHook(a, b, c, d, e, f, g, h)
#define GetOldestNonRemovableTransactionId_compat(a, b) \
GetOldestNonRemovableTransactionId(a)
#define ExecSimpleRelationInsert_compat(a, b, c) \
ExecSimpleRelationInsert(a, b, c)
#define index_insert_compat(a, b, c, d, e, f, g, h) \
index_insert(a, b, c, d, e, f, g, h)
#else
#define ColumnarProcessUtility_compat(a, b, c, d, e, f, g, h) \
ColumnarProcessUtility(a, b, d, e, f, g, h)
#define PrevProcessUtilityHook_compat(a, b, c, d, e, f, g, h) \
PrevProcessUtilityHook(a, b, d, e, f, g, h)
#define GetOldestNonRemovableTransactionId_compat(a, b) GetOldestXmin(a, b)
#define ExecSimpleRelationInsert_compat(a, b, c) \
ExecSimpleRelationInsert(b, c)
#define index_insert_compat(a, b, c, d, e, f, g, h) \
index_insert(a, b, c, d, e, f, h)
#endif
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE #define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
#define ExplainPropertyLong(qlabel, value, es) \ #define ExplainPropertyLong(qlabel, value, es) \

View File

@ -31,12 +31,7 @@
typedef enum CitusCopyDest typedef enum CitusCopyDest
{ {
COPY_FILE, /* to/from file (or a piped program) */ COPY_FILE, /* to/from file (or a piped program) */
#if PG_VERSION_NUM >= PG_VERSION_14
COPY_FRONTEND, /* to frontend */ COPY_FRONTEND, /* to frontend */
#else
COPY_OLD_FE, /* to/from frontend (2.0 protocol) */
COPY_NEW_FE, /* to/from frontend (3.0 protocol) */
#endif
COPY_CALLBACK /* to/from callback function */ COPY_CALLBACK /* to/from callback function */
} CitusCopyDest; } CitusCopyDest;

View File

@ -79,9 +79,7 @@ typedef struct DDLJob
extern ProcessUtility_hook_type PrevProcessUtility; extern ProcessUtility_hook_type PrevProcessUtility;
extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString, extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
#if PG_VERSION_NUM >= PG_VERSION_14
bool readOnlyTree, bool readOnlyTree,
#endif
ProcessUtilityContext context, ParamListInfo params, ProcessUtilityContext context, ParamListInfo params,
struct QueryEnvironment *queryEnv, DestReceiver *dest, struct QueryEnvironment *queryEnv, DestReceiver *dest,
QueryCompletion *completionTag QueryCompletion *completionTag

View File

@ -353,7 +353,4 @@ extern bool CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events,
extern double MillisecondsPassedSince(instr_time moment); extern double MillisecondsPassedSince(instr_time moment);
extern long MillisecondsToTimeout(instr_time start, long msAfterStart); extern long MillisecondsToTimeout(instr_time start, long msAfterStart);
#if PG_VERSION_NUM < 140000
extern void WarmUpConnParamsHash(void);
#endif
#endif /* CONNECTION_MANAGMENT_H */ #endif /* CONNECTION_MANAGMENT_H */

View File

@ -11,7 +11,6 @@
#ifndef PG_VERSION_CONSTANTS #ifndef PG_VERSION_CONSTANTS
#define PG_VERSION_CONSTANTS #define PG_VERSION_CONSTANTS
#define PG_VERSION_13 130000
#define PG_VERSION_14 140000 #define PG_VERSION_14 140000
#define PG_VERSION_15 150000 #define PG_VERSION_15 150000
#define PG_VERSION_16 160000 #define PG_VERSION_16 160000

View File

@ -61,8 +61,7 @@ pg_strtoint64(char *s)
* We want to use it in all versions. So we backport it ourselves in earlier * We want to use it in all versions. So we backport it ourselves in earlier
* versions, and rely on the Postgres provided version in the later versions. * versions, and rely on the Postgres provided version in the later versions.
*/ */
#if PG_VERSION_NUM >= PG_VERSION_13 && PG_VERSION_NUM < 130010 \ #if PG_VERSION_NUM < 140007
|| PG_VERSION_NUM >= PG_VERSION_14 && PG_VERSION_NUM < 140007
static inline SMgrRelation static inline SMgrRelation
RelationGetSmgr(Relation rel) RelationGetSmgr(Relation rel)
{ {
@ -84,67 +83,6 @@ RelationGetSmgr(Relation rel)
#endif #endif
#if PG_VERSION_NUM >= PG_VERSION_14
#define AlterTableStmtObjType_compat(a) ((a)->objtype)
#define getObjectTypeDescription_compat(a, b) getObjectTypeDescription(a, b)
#define getObjectIdentity_compat(a, b) getObjectIdentity(a, b)
/* for MemoryContextMethods->stats */
#define stats_compat(a, b, c, d, e) stats(a, b, c, d, e)
#define FuncnameGetCandidates_compat(a, b, c, d, e, f, g) \
FuncnameGetCandidates(a, b, c, d, e, f, g)
#define expand_function_arguments_compat(a, b, c, d) expand_function_arguments(a, b, c, d)
#define BeginCopyFrom_compat(a, b, c, d, e, f, g, h) BeginCopyFrom(a, b, c, d, e, f, g, h)
#define standard_ProcessUtility_compat(a, b, c, d, e, f, g, h) \
standard_ProcessUtility(a, b, c, d, e, f, g, h)
#define ProcessUtility_compat(a, b, c, d, e, f, g, h) \
ProcessUtility(a, b, c, d, e, f, g, h)
#define PrevProcessUtility_compat(a, b, c, d, e, f, g, h) \
PrevProcessUtility(a, b, c, d, e, f, g, h)
#define SetTuplestoreDestReceiverParams_compat(a, b, c, d, e, f) \
SetTuplestoreDestReceiverParams(a, b, c, d, e, f)
#define pgproc_statusflags_compat(pgproc) ((pgproc)->statusFlags)
#define get_partition_parent_compat(a, b) get_partition_parent(a, b)
#define RelationGetPartitionDesc_compat(a, b) RelationGetPartitionDesc(a, b)
#define make_simple_restrictinfo_compat(a, b) make_simple_restrictinfo(a, b)
#define pull_varnos_compat(a, b) pull_varnos(a, b)
#else
#define AlterTableStmtObjType_compat(a) ((a)->relkind)
#define F_NEXTVAL F_NEXTVAL_OID
#define ROLE_PG_MONITOR DEFAULT_ROLE_MONITOR
#define PROC_WAIT_STATUS_WAITING STATUS_WAITING
#define getObjectTypeDescription_compat(a, b) getObjectTypeDescription(a)
#define getObjectIdentity_compat(a, b) getObjectIdentity(a)
/* for MemoryContextMethods->stats */
#define stats_compat(a, b, c, d, e) stats(a, b, c, d)
#define FuncnameGetCandidates_compat(a, b, c, d, e, f, g) \
FuncnameGetCandidates(a, b, c, d, e, g)
#define expand_function_arguments_compat(a, b, c, d) expand_function_arguments(a, c, d)
#define VacOptValue VacOptTernaryValue
#define VACOPTVALUE_UNSPECIFIED VACOPT_TERNARY_DEFAULT
#define VACOPTVALUE_DISABLED VACOPT_TERNARY_DISABLED
#define VACOPTVALUE_ENABLED VACOPT_TERNARY_ENABLED
#define CopyFromState CopyState
#define BeginCopyFrom_compat(a, b, c, d, e, f, g, h) BeginCopyFrom(a, b, d, e, f, g, h)
#define standard_ProcessUtility_compat(a, b, c, d, e, f, g, h) \
standard_ProcessUtility(a, b, d, e, f, g, h)
#define ProcessUtility_compat(a, b, c, d, e, f, g, h) ProcessUtility(a, b, d, e, f, g, h)
#define PrevProcessUtility_compat(a, b, c, d, e, f, g, h) \
PrevProcessUtility(a, b, d, e, f, g, h)
#define COPY_FRONTEND COPY_NEW_FE
#define SetTuplestoreDestReceiverParams_compat(a, b, c, d, e, f) \
SetTuplestoreDestReceiverParams(a, b, c, d)
#define pgproc_statusflags_compat(pgproc) \
((&ProcGlobal->allPgXact[(pgproc)->pgprocno])->vacuumFlags)
#define get_partition_parent_compat(a, b) get_partition_parent(a)
#define RelationGetPartitionDesc_compat(a, b) RelationGetPartitionDesc(a)
#define PQ_LARGE_MESSAGE_LIMIT 0
#define make_simple_restrictinfo_compat(a, b) make_simple_restrictinfo(b)
#define pull_varnos_compat(a, b) pull_varnos(b)
#define ROLE_PG_READ_ALL_STATS DEFAULT_ROLE_READ_ALL_STATS
#endif
#define SetListCellPtr(a, b) ((a)->ptr_value = (b)) #define SetListCellPtr(a, b) ((a)->ptr_value = (b))
#define RangeTableEntryFromNSItem(a) ((a)->p_rte) #define RangeTableEntryFromNSItem(a) ((a)->p_rte)
#define fcGetArgValue(fc, n) ((fc)->args[n].value) #define fcGetArgValue(fc, n) ((fc)->args[n].value)

View File

@ -98,34 +98,7 @@ s/of relation ".*" violates not-null constraint/violates not-null constraint/g
s/partition ".*" would be violated by some row/partition would be violated by some row/g s/partition ".*" would be violated by some row/partition would be violated by some row/g
s/of relation ".*" contains null values/contains null values/g s/of relation ".*" contains null values/contains null values/g
#if (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) s/(Citus Background Task Queue Executor: regression\/postgres for \()[0-9]+\/[0-9]+\)/\1xxxxx\/xxxxx\)/g
# (This is not preprocessor directive, but a reminder for the developer that will drop PG13 support )
# libpq message changes for minor versions of pg13
# We ignore multiline error messages, and substitute first line with a single line
# alternative that is used in some older libpq versions.
s/(ERROR: |WARNING: |error:) server closed the connection unexpectedly/\1 connection not open/g
/^\s*This probably means the server terminated abnormally$/d
/^\s*before or while processing the request.$/d
/^\s*connection not open$/d
s/ERROR: fake_fetch_row_version not implemented/ERROR: fake_tuple_update not implemented/g
s/ERROR: COMMIT is not allowed in an SQL function/ERROR: COMMIT is not allowed in a SQL function/g
s/ERROR: ROLLBACK is not allowed in an SQL function/ERROR: ROLLBACK is not allowed in a SQL function/g
/.*Async-Capable.*/d
/.*Async Capable.*/d
/Parent Relationship/d
/Parent-Relationship/d
s/function array_cat_agg\(anyarray\) anyarray/function array_cat_agg\(anycompatiblearray\) anycompatiblearray/g
s/function array_cat_agg\(anyarray\)/function array_cat_agg\(anycompatiblearray\)/g
s/TRIM\(BOTH FROM value\)/btrim\(value\)/g
/DETAIL: Subqueries are not supported in policies on distributed tables/d
s/ERROR: unexpected non-SELECT command in SubLink/ERROR: cannot create policy/g
# PG13 changes bgworker sigterm message, we can drop that line with PG13 drop
s/(FATAL: terminating).*Citus Background Task Queue Executor.*(due to administrator command)\+/\1 connection \2 \+/g
#endif /* (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) */
# Changed outputs after minor bump to PG14.5 and PG13.8 # Changed outputs after minor bump to PG14.5 and PG13.8
s/(ERROR: |WARNING: |error:) invalid socket/\1 connection not open/g s/(ERROR: |WARNING: |error:) invalid socket/\1 connection not open/g
@ -135,9 +108,18 @@ s/(ERROR: |WARNING: |error:) invalid socket/\1 connection not open/g
# pg15 changes # pg15 changes
# can be removed when dropping PG13&14 support # can be removed when dropping PG13&14 support
#if (PG_VERSION_NUM >= PG_VERSION_14) && (PG_VERSION_NUM < PG_VERSION_15)
# (This is not preprocessor directive, but a reminder for the developer that will drop PG14 support )
s/is not a PostgreSQL server process/is not a PostgreSQL backend process/g s/is not a PostgreSQL server process/is not a PostgreSQL backend process/g
s/ AS "\?column\?"//g s/ AS "\?column\?"//g
s/".*\.(.*)": (found .* removable)/"\1": \2/g s/".*\.(.*)": (found .* removable)/"\1": \2/g
# We ignore multiline error messages, and substitute first line with a single line
# alternative that is used in some older libpq versions.
s/(ERROR: |WARNING: |error:) server closed the connection unexpectedly/\1 connection not open/g
/^\s*This probably means the server terminated abnormally$/d
/^\s*before or while processing the request.$/d
/^\s*connection not open$/d
#endif /* (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) */
# intermediate_results # intermediate_results
s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g

View File

@ -166,6 +166,7 @@ DEPS = {
"multi_table_ddl", "multi_table_ddl",
], ],
), ),
"grant_on_schema_propagation": TestDeps("minimal_schedule"),
} }

View File

@ -495,11 +495,11 @@ SELECT task_id, status, retry_count, message FROM pg_dist_background_task
ORDER BY task_id; -- show that all tasks are runnable by retry policy after termination signal ORDER BY task_id; -- show that all tasks are runnable by retry policy after termination signal
task_id | status | retry_count | message task_id | status | retry_count | message
--------------------------------------------------------------------- ---------------------------------------------------------------------
1450019 | runnable | 1 | FATAL: terminating connection due to administrator command + 1450019 | runnable | 1 | FATAL: terminating background worker "Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx)" due to administrator command+
| | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (1450011/1450019) + | | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx) +
| | | | | |
1450020 | runnable | 1 | FATAL: terminating connection due to administrator command + 1450020 | runnable | 1 | FATAL: terminating background worker "Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx)" due to administrator command+
| | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (1450012/1450020) + | | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx) +
| | | | | |
(2 rows) (2 rows)

View File

@ -85,17 +85,14 @@ SET search_path TO cpu_priority;
-- in their CREATE SUBSCRIPTION commands. -- in their CREATE SUBSCRIPTION commands.
SET citus.log_remote_commands TO ON; SET citus.log_remote_commands TO ON;
SET citus.grep_remote_commands = '%CREATE SUBSCRIPTION%'; SET citus.grep_remote_commands = '%CREATE SUBSCRIPTION%';
-- We disable binary protocol, so we have consistent output between PG13 and
-- PG14, beacuse PG13 doesn't support binary logical replication.
SET citus.enable_binary_protocol = false;
SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
master_move_shard_placement master_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -104,13 +101,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.cpu_priority_for_logical_replication_senders = 15; SET citus.cpu_priority_for_logical_replication_senders = 15;
SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical');
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
master_move_shard_placement master_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -119,13 +116,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.max_high_priority_background_processes = 3; SET citus.max_high_priority_background_processes = 3;
SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
master_move_shard_placement master_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -145,21 +142,21 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
ARRAY['-1500000000'], ARRAY['-1500000000'],
ARRAY[:worker_1_node, :worker_2_node], ARRAY[:worker_1_node, :worker_2_node],
'force_logical'); 'force_logical');
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx) NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
citus_split_shard_by_split_points citus_split_shard_by_split_points
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -1,11 +1,3 @@
-- This test file has an alternative output because of error messages vary for PG13
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13;
server_version_le_13
---------------------------------------------------------------------
f
(1 row)
CREATE SCHEMA generated_identities; CREATE SCHEMA generated_identities;
SET search_path TO generated_identities; SET search_path TO generated_identities;
SET client_min_messages to ERROR; SET client_min_messages to ERROR;

View File

@ -1,431 +0,0 @@
-- This test file has an alternative output because of error messages vary for PG13
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13;
server_version_le_13
---------------------------------------------------------------------
t
(1 row)
CREATE SCHEMA generated_identities;
SET search_path TO generated_identities;
SET client_min_messages to ERROR;
SET citus.shard_replication_factor TO 1;
SELECT 1 from citus_add_node('localhost', :master_port, groupId=>0);
?column?
---------------------------------------------------------------------
1
(1 row)
-- smallint identity column can not be distributed
CREATE TABLE smallint_identity_column (
a smallint GENERATED BY DEFAULT AS IDENTITY
);
SELECT create_distributed_table('smallint_identity_column', 'a');
ERROR: cannot complete operation on generated_identities.smallint_identity_column with smallint/int identity column
HINT: Use bigint identity column instead.
SELECT create_distributed_table_concurrently('smallint_identity_column', 'a');
ERROR: cannot complete operation on generated_identities.smallint_identity_column with smallint/int identity column
HINT: Use bigint identity column instead.
SELECT create_reference_table('smallint_identity_column');
ERROR: cannot complete operation on a table with identity column
SELECT citus_add_local_table_to_metadata('smallint_identity_column');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
DROP TABLE smallint_identity_column;
-- int identity column can not be distributed
CREATE TABLE int_identity_column (
a int GENERATED BY DEFAULT AS IDENTITY
);
SELECT create_distributed_table('int_identity_column', 'a');
ERROR: cannot complete operation on generated_identities.int_identity_column with smallint/int identity column
HINT: Use bigint identity column instead.
SELECT create_distributed_table_concurrently('int_identity_column', 'a');
ERROR: cannot complete operation on generated_identities.int_identity_column with smallint/int identity column
HINT: Use bigint identity column instead.
SELECT create_reference_table('int_identity_column');
ERROR: cannot complete operation on a table with identity column
SELECT citus_add_local_table_to_metadata('int_identity_column');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
DROP TABLE int_identity_column;
RESET citus.shard_replication_factor;
CREATE TABLE bigint_identity_column (
a bigint GENERATED BY DEFAULT AS IDENTITY,
b int
);
SELECT citus_add_local_table_to_metadata('bigint_identity_column');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
DROP TABLE bigint_identity_column;
CREATE TABLE bigint_identity_column (
a bigint GENERATED BY DEFAULT AS IDENTITY,
b int
);
SELECT create_distributed_table('bigint_identity_column', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\d bigint_identity_column
Table "generated_identities.bigint_identity_column"
Column | Type | Collation | Nullable | Default
---------------------------------------------------------------------
a | bigint | | not null | generated by default as identity
b | integer | | |
\c - - - :worker_1_port
SET search_path TO generated_identities;
SET client_min_messages to ERROR;
INSERT INTO bigint_identity_column (b)
SELECT s FROM generate_series(1,10) s;
\d generated_identities.bigint_identity_column
Table "generated_identities.bigint_identity_column"
Column | Type | Collation | Nullable | Default
---------------------------------------------------------------------
a | bigint | | not null | generated by default as identity
b | integer | | |
\c - - - :master_port
SET search_path TO generated_identities;
SET client_min_messages to ERROR;
INSERT INTO bigint_identity_column (b)
SELECT s FROM generate_series(11,20) s;
SELECT * FROM bigint_identity_column ORDER BY B ASC;
a | b
---------------------------------------------------------------------
3940649673949185 | 1
3940649673949186 | 2
3940649673949187 | 3
3940649673949188 | 4
3940649673949189 | 5
3940649673949190 | 6
3940649673949191 | 7
3940649673949192 | 8
3940649673949193 | 9
3940649673949194 | 10
1 | 11
2 | 12
3 | 13
4 | 14
5 | 15
6 | 16
7 | 17
8 | 18
9 | 19
10 | 20
(20 rows)
-- table with identity column cannot be altered.
SELECT alter_distributed_table('bigint_identity_column', 'b');
ERROR: cannot complete operation on a table with identity column
-- table with identity column cannot be undistributed.
SELECT undistribute_table('bigint_identity_column');
ERROR: cannot complete operation on a table with identity column
DROP TABLE bigint_identity_column;
-- create a partitioned table for testing.
CREATE TABLE partitioned_table (
a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10),
c int
)
PARTITION BY RANGE (c);
CREATE TABLE partitioned_table_1_50 PARTITION OF partitioned_table FOR VALUES FROM (1) TO (50);
CREATE TABLE partitioned_table_50_500 PARTITION OF partitioned_table FOR VALUES FROM (50) TO (1000);
SELECT create_distributed_table('partitioned_table', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\d partitioned_table
Partitioned table "generated_identities.partitioned_table"
Column | Type | Collation | Nullable | Default
---------------------------------------------------------------------
a | bigint | | not null | generated by default as identity
b | bigint | | not null | generated always as identity
c | integer | | |
Partition key: RANGE (c)
Number of partitions: 2 (Use \d+ to list them.)
\c - - - :worker_1_port
SET search_path TO generated_identities;
SET client_min_messages to ERROR;
\d generated_identities.partitioned_table
Partitioned table "generated_identities.partitioned_table"
Column | Type | Collation | Nullable | Default
---------------------------------------------------------------------
a | bigint | | not null | generated by default as identity
b | bigint | | not null | generated always as identity
c | integer | | |
Partition key: RANGE (c)
Number of partitions: 2 (Use \d+ to list them.)
insert into partitioned_table (c) values (1);
insert into partitioned_table (c) SELECT 2;
INSERT INTO partitioned_table (c)
SELECT s FROM generate_series(3,7) s;
\c - - - :master_port
SET search_path TO generated_identities;
SET client_min_messages to ERROR;
INSERT INTO partitioned_table (c)
SELECT s FROM generate_series(10,20) s;
INSERT INTO partitioned_table (a,c) VALUES (998,998);
INSERT INTO partitioned_table (a,b,c) OVERRIDING SYSTEM VALUE VALUES (999,999,999);
SELECT * FROM partitioned_table ORDER BY c ASC;
a | b | c
---------------------------------------------------------------------
3940649673949185 | 3940649673949185 | 1
3940649673949195 | 3940649673949195 | 2
3940649673949205 | 3940649673949205 | 3
3940649673949215 | 3940649673949215 | 4
3940649673949225 | 3940649673949225 | 5
3940649673949235 | 3940649673949235 | 6
3940649673949245 | 3940649673949245 | 7
10 | 10 | 10
20 | 20 | 11
30 | 30 | 12
40 | 40 | 13
50 | 50 | 14
60 | 60 | 15
70 | 70 | 16
80 | 80 | 17
90 | 90 | 18
100 | 100 | 19
110 | 110 | 20
998 | 120 | 998
999 | 999 | 999
(20 rows)
-- alter table .. alter column .. add is unsupported
ALTER TABLE partitioned_table ALTER COLUMN g ADD GENERATED ALWAYS AS IDENTITY;
ERROR: alter table command is currently unsupported
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
-- alter table .. alter column is unsupported
ALTER TABLE partitioned_table ALTER COLUMN b TYPE int;
ERROR: cannot execute ALTER COLUMN command involving identity column
DROP TABLE partitioned_table;
-- create a table for reference table testing.
CREATE TABLE reference_table (
a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10) UNIQUE,
c int
);
SELECT create_reference_table('reference_table');
create_reference_table
---------------------------------------------------------------------
(1 row)
\d reference_table
Table "generated_identities.reference_table"
Column | Type | Collation | Nullable | Default
---------------------------------------------------------------------
a | bigint | | not null | generated by default as identity
b | bigint | | not null | generated always as identity
c | integer | | |
Indexes:
"reference_table_b_key" UNIQUE CONSTRAINT, btree (b)
\c - - - :worker_1_port
SET search_path TO generated_identities;
\d generated_identities.reference_table
Table "generated_identities.reference_table"
Column | Type | Collation | Nullable | Default
---------------------------------------------------------------------
a | bigint | | not null | generated by default as identity
b | bigint | | not null | generated always as identity
c | integer | | |
Indexes:
"reference_table_b_key" UNIQUE CONSTRAINT, btree (b)
INSERT INTO reference_table (c)
SELECT s FROM generate_series(1,10) s;
--on master
select * from reference_table;
a | b | c
---------------------------------------------------------------------
3940649673949185 | 3940649673949185 | 1
3940649673949195 | 3940649673949195 | 2
3940649673949205 | 3940649673949205 | 3
3940649673949215 | 3940649673949215 | 4
3940649673949225 | 3940649673949225 | 5
3940649673949235 | 3940649673949235 | 6
3940649673949245 | 3940649673949245 | 7
3940649673949255 | 3940649673949255 | 8
3940649673949265 | 3940649673949265 | 9
3940649673949275 | 3940649673949275 | 10
(10 rows)
\c - - - :master_port
SET search_path TO generated_identities;
SET client_min_messages to ERROR;
INSERT INTO reference_table (c)
SELECT s FROM generate_series(11,20) s;
SELECT * FROM reference_table ORDER BY c ASC;
a | b | c
---------------------------------------------------------------------
3940649673949185 | 3940649673949185 | 1
3940649673949195 | 3940649673949195 | 2
3940649673949205 | 3940649673949205 | 3
3940649673949215 | 3940649673949215 | 4
3940649673949225 | 3940649673949225 | 5
3940649673949235 | 3940649673949235 | 6
3940649673949245 | 3940649673949245 | 7
3940649673949255 | 3940649673949255 | 8
3940649673949265 | 3940649673949265 | 9
3940649673949275 | 3940649673949275 | 10
10 | 10 | 11
20 | 20 | 12
30 | 30 | 13
40 | 40 | 14
50 | 50 | 15
60 | 60 | 16
70 | 70 | 17
80 | 80 | 18
90 | 90 | 19
100 | 100 | 20
(20 rows)
DROP TABLE reference_table;
CREATE TABLE color (
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
color_name VARCHAR NOT NULL
);
-- https://github.com/citusdata/citus/issues/6694
CREATE USER identity_test_user;
GRANT INSERT ON color TO identity_test_user;
GRANT USAGE ON SCHEMA generated_identities TO identity_test_user;
SET ROLE identity_test_user;
SELECT create_distributed_table('color', 'color_id');
ERROR: must be owner of table color
SET ROLE postgres;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table_concurrently('color', 'color_id');
create_distributed_table_concurrently
---------------------------------------------------------------------
(1 row)
RESET citus.shard_replication_factor;
\c - identity_test_user - :worker_1_port
SET search_path TO generated_identities;
SET client_min_messages to ERROR;
INSERT INTO color(color_name) VALUES ('Blue');
\c - postgres - :master_port
SET search_path TO generated_identities;
SET client_min_messages to ERROR;
SET citus.next_shard_id TO 12400000;
DROP TABLE Color;
CREATE TABLE color (
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
color_name VARCHAR NOT NULL
) USING columnar;
SELECT create_distributed_table('color', 'color_id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO color(color_name) VALUES ('Blue');
\d+ color
Table "generated_identities.color"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------------------------------------------------------------------
color_id | bigint | | not null | generated always as identity | plain | |
color_name | character varying | | not null | | extended | |
Indexes:
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
\c - - - :worker_1_port
SET search_path TO generated_identities;
\d+ color
Table "generated_identities.color"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------------------------------------------------------------------
color_id | bigint | | not null | generated always as identity | plain | |
color_name | character varying | | not null | | extended | |
Indexes:
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
INSERT INTO color(color_name) VALUES ('Red');
-- alter sequence .. restart
ALTER SEQUENCE color_color_id_seq RESTART WITH 1000;
ERROR: Altering a distributed sequence is currently not supported.
-- override system value
INSERT INTO color(color_id, color_name) VALUES (1, 'Red');
ERROR: cannot insert into column "color_id"
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
HINT: Use OVERRIDING SYSTEM VALUE to override.
INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red');
ERROR: cannot insert into column "color_id"
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
HINT: Use OVERRIDING SYSTEM VALUE to override.
INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red');
ERROR: duplicate key value violates unique constraint "color_color_id_key_12400000"
DETAIL: Key (color_id)=(1) already exists.
CONTEXT: while executing command on localhost:xxxxx
-- update null or custom value
UPDATE color SET color_id = NULL;
ERROR: column "color_id" can only be updated to DEFAULT
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
UPDATE color SET color_id = 1;
ERROR: column "color_id" can only be updated to DEFAULT
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
\c - postgres - :master_port
SET search_path TO generated_identities;
SET client_min_messages to ERROR;
-- alter table .. add column .. GENERATED .. AS IDENTITY
ALTER TABLE color ADD COLUMN color_id BIGINT GENERATED ALWAYS AS IDENTITY;
ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers
-- alter sequence .. restart
ALTER SEQUENCE color_color_id_seq RESTART WITH 1000;
ERROR: Altering a distributed sequence is currently not supported.
-- override system value
INSERT INTO color(color_id, color_name) VALUES (1, 'Red');
ERROR: cannot insert into column "color_id"
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
HINT: Use OVERRIDING SYSTEM VALUE to override.
INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red');
ERROR: cannot insert into column "color_id"
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
HINT: Use OVERRIDING SYSTEM VALUE to override.
INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red');
ERROR: duplicate key value violates unique constraint "color_color_id_key_12400000"
DETAIL: Key (color_id)=(1) already exists.
CONTEXT: while executing command on localhost:xxxxx
-- update null or custom value
UPDATE color SET color_id = NULL;
ERROR: column "color_id" can only be updated to DEFAULT
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
UPDATE color SET color_id = 1;
ERROR: column "color_id" can only be updated to DEFAULT
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
DROP TABLE IF EXISTS test;
CREATE TABLE test (x int, y int, z bigint generated by default as identity);
SELECT create_distributed_table('test', 'x', colocate_with := 'none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO test VALUES (1,2);
INSERT INTO test SELECT x, y FROM test WHERE x = 1;
SELECT * FROM test;
x | y | z
---------------------------------------------------------------------
1 | 2 | 1
1 | 2 | 2
(2 rows)
DROP SCHEMA generated_identities CASCADE;
DROP USER identity_test_user;

View File

@ -1,7 +1,7 @@
-- --
-- GRANT_ON_SCHEMA_PROPAGATION -- GRANT_ON_SCHEMA_PROPAGATION
-- --
-- this test has different output for PG13/14 compared to PG15 -- this test has different output for PG14 compared to PG15
-- In PG15, public schema is owned by pg_database_owner role -- In PG15, public schema is owned by pg_database_owner role
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
SHOW server_version \gset SHOW server_version \gset
@ -327,6 +327,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
(1 row) (1 row)
-- to avoid different output in PG15
GRANT CREATE ON SCHEMA public TO public;
-- distribute the public schema (it has to be distributed by now but just in case) -- distribute the public schema (it has to be distributed by now but just in case)
CREATE TABLE public_schema_table (id INT); CREATE TABLE public_schema_table (id INT);
SELECT create_distributed_table('public_schema_table', 'id'); SELECT create_distributed_table('public_schema_table', 'id');

View File

@ -1,7 +1,7 @@
-- --
-- GRANT_ON_SCHEMA_PROPAGATION -- GRANT_ON_SCHEMA_PROPAGATION
-- --
-- this test has different output for PG13/14 compared to PG15 -- this test has different output for PG14 compared to PG15
-- In PG15, public schema is owned by pg_database_owner role -- In PG15, public schema is owned by pg_database_owner role
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
SHOW server_version \gset SHOW server_version \gset
@ -327,6 +327,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
(1 row) (1 row)
-- to avoid different output in PG15
GRANT CREATE ON SCHEMA public TO public;
-- distribute the public schema (it has to be distributed by now but just in case) -- distribute the public schema (it has to be distributed by now but just in case)
CREATE TABLE public_schema_table (id INT); CREATE TABLE public_schema_table (id INT);
SELECT create_distributed_table('public_schema_table', 'id'); SELECT create_distributed_table('public_schema_table', 'id');

View File

@ -1,66 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin: BEGIN;
step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
step s2-begin: BEGIN;
step s2-update-node-1:
-- update a specific node by address
SELECT master_update_node(nodeid, 'localhost', nodeport + 10)
FROM pg_dist_node
WHERE nodename = 'localhost'
AND nodeport = 57637;
<waiting ...>
step s1-abort: ABORT;
step s2-update-node-1: <... completed>
master_update_node
---------------------------------------------------------------------
(1 row)
step s2-abort: ABORT;
master_remove_node
---------------------------------------------------------------------
(2 rows)
starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin: BEGIN;
step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
step s2-begin: BEGIN;
step s2-update-node-1-force:
-- update a specific node by address (force)
SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100)
FROM pg_dist_node
WHERE nodename = 'localhost'
AND nodeport = 57637;
<waiting ...>
step s2-update-node-1-force: <... completed>
master_update_node
---------------------------------------------------------------------
(1 row)
step s2-abort: ABORT;
step s1-abort: ABORT;
FATAL: terminating connection due to administrator command
server closed the connection unexpectedly
master_remove_node
---------------------------------------------------------------------
(2 rows)

View File

@ -1200,7 +1200,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1209,7 +1209,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1218,7 +1218,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1227,7 +1227,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1236,7 +1236,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1245,7 +1245,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1254,7 +1254,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1263,7 +1263,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12

View File

@ -1200,7 +1200,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1209,7 +1209,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1218,7 +1218,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1227,7 +1227,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1236,7 +1236,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1245,7 +1245,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1254,7 +1254,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1263,7 +1263,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12

View File

@ -1187,7 +1187,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1196,7 +1196,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1205,7 +1205,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1214,7 +1214,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1223,7 +1223,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1232,7 +1232,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1241,7 +1241,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1250,7 +1250,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12

View File

@ -1187,7 +1187,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1196,7 +1196,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1205,7 +1205,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1214,7 +1214,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1223,7 +1223,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1232,7 +1232,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1241,7 +1241,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12
@ -1250,7 +1250,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
EXECUTE local_prepare_no_param_subquery; EXECUTE local_prepare_no_param_subquery;
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
btrim btrim
--------------------------------------------------------------------- ---------------------------------------------------------------------
12 12

View File

@ -538,6 +538,7 @@ CREATE POLICY fp_s ON information FOR SELECT
-- this attempt for distribution fails because the table has a disallowed expression -- this attempt for distribution fails because the table has a disallowed expression
SELECT create_distributed_table('information', 'group_id'); SELECT create_distributed_table('information', 'group_id');
ERROR: cannot create policy ERROR: cannot create policy
DETAIL: Subqueries are not supported in policies on distributed tables
-- DROP the expression so we can distribute the table -- DROP the expression so we can distribute the table
DROP POLICY fp_s ON information; DROP POLICY fp_s ON information;
SELECT create_distributed_table('information', 'group_id'); SELECT create_distributed_table('information', 'group_id');
@ -549,7 +550,7 @@ SELECT create_distributed_table('information', 'group_id');
-- Try and create the expression on a distributed table, this should also fail -- Try and create the expression on a distributed table, this should also fail
CREATE POLICY fp_s ON information FOR SELECT CREATE POLICY fp_s ON information FOR SELECT
USING (group_id <= (SELECT group_id FROM users WHERE user_name = current_user)); USING (group_id <= (SELECT group_id FROM users WHERE user_name = current_user));
ERROR: cannot create policy ERROR: unexpected non-SELECT command in SubLink
-- Clean up test -- Clean up test
DROP TABLE information, groups, users; DROP TABLE information, groups, users;
SET citus.next_shard_id TO 1810000; SET citus.next_shard_id TO 1810000;

View File

@ -98,19 +98,24 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Plan": { "Plan": {
"Node Type": "Sort", "Node Type": "Sort",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"], "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
"Plans": [ "Plans": [
{ {
"Node Type": "Aggregate", "Node Type": "Aggregate",
"Strategy": "Hashed", "Strategy": "Hashed",
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Group Key": ["remote_scan.l_quantity"], "Group Key": ["remote_scan.l_quantity"],
"Plans": [ "Plans": [
{ {
"Node Type": "Custom Scan", "Node Type": "Custom Scan",
"Parent Relationship": "Outer",
"Custom Plan Provider": "Citus Adaptive", "Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Distributed Query": { "Distributed Query": {
"Job": { "Job": {
"Task Count": 2, "Task Count": 2,
@ -126,11 +131,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Strategy": "Hashed", "Strategy": "Hashed",
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Group Key": ["l_quantity"], "Group Key": ["l_quantity"],
"Plans": [ "Plans": [
{ {
"Node Type": "Seq Scan", "Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Relation Name": "lineitem_360000", "Relation Name": "lineitem_360000",
"Alias": "lineitem" "Alias": "lineitem"
} }
@ -172,6 +180,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Plan> <Plan>
<Node-Type>Sort</Node-Type> <Node-Type>Sort</Node-Type>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Sort-Key> <Sort-Key>
<Item>(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))</Item> <Item>(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))</Item>
<Item>remote_scan.l_quantity</Item> <Item>remote_scan.l_quantity</Item>
@ -181,15 +190,19 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Node-Type>Aggregate</Node-Type> <Node-Type>Aggregate</Node-Type>
<Strategy>Hashed</Strategy> <Strategy>Hashed</Strategy>
<Partial-Mode>Simple</Partial-Mode> <Partial-Mode>Simple</Partial-Mode>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Group-Key> <Group-Key>
<Item>remote_scan.l_quantity</Item> <Item>remote_scan.l_quantity</Item>
</Group-Key> </Group-Key>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Custom Scan</Node-Type> <Node-Type>Custom Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider> <Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Distributed-Query> <Distributed-Query>
<Job> <Job>
<Task-Count>2</Task-Count> <Task-Count>2</Task-Count>
@ -205,13 +218,16 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Strategy>Hashed</Strategy> <Strategy>Hashed</Strategy>
<Partial-Mode>Simple</Partial-Mode> <Partial-Mode>Simple</Partial-Mode>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Group-Key> <Group-Key>
<Item>l_quantity</Item> <Item>l_quantity</Item>
</Group-Key> </Group-Key>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Seq Scan</Node-Type> <Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Relation-Name>lineitem_360000</Relation-Name> <Relation-Name>lineitem_360000</Relation-Name>
<Alias>lineitem</Alias> <Alias>lineitem</Alias>
</Plan> </Plan>
@ -250,6 +266,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
- Plan: - Plan:
Node Type: "Sort" Node Type: "Sort"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Sort Key: Sort Key:
- "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))" - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))"
- "remote_scan.l_quantity" - "remote_scan.l_quantity"
@ -257,13 +274,17 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
- Node Type: "Aggregate" - Node Type: "Aggregate"
Strategy: "Hashed" Strategy: "Hashed"
Partial Mode: "Simple" Partial Mode: "Simple"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Group Key: Group Key:
- "remote_scan.l_quantity" - "remote_scan.l_quantity"
Plans: Plans:
- Node Type: "Custom Scan" - Node Type: "Custom Scan"
Parent Relationship: "Outer"
Custom Plan Provider: "Citus Adaptive" Custom Plan Provider: "Citus Adaptive"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Distributed Query: Distributed Query:
Job: Job:
Task Count: 2 Task Count: 2
@ -276,11 +297,14 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
Strategy: "Hashed" Strategy: "Hashed"
Partial Mode: "Simple" Partial Mode: "Simple"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Group Key: Group Key:
- "l_quantity" - "l_quantity"
Plans: Plans:
- Node Type: "Seq Scan" - Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Relation Name: "lineitem_360000" Relation Name: "lineitem_360000"
Alias: "lineitem" Alias: "lineitem"
@ -1135,11 +1159,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Strategy": "Plain", "Strategy": "Plain",
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Custom Scan", "Node Type": "Custom Scan",
"Parent Relationship": "Outer",
"Custom Plan Provider": "Citus Adaptive", "Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Distributed Query": { "Distributed Query": {
"Job": { "Job": {
"Task Count": 6, "Task Count": 6,
@ -1191,11 +1218,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Strategy>Plain</Strategy> <Strategy>Plain</Strategy>
<Partial-Mode>Simple</Partial-Mode> <Partial-Mode>Simple</Partial-Mode>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Custom Scan</Node-Type> <Node-Type>Custom Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider> <Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Distributed-Query> <Distributed-Query>
<Job> <Job>
<Task-Count>6</Task-Count> <Task-Count>6</Task-Count>
@ -1258,10 +1288,13 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
Strategy: "Plain" Strategy: "Plain"
Partial Mode: "Simple" Partial Mode: "Simple"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Plans: Plans:
- Node Type: "Custom Scan" - Node Type: "Custom Scan"
Parent Relationship: "Outer"
Custom Plan Provider: "Citus Adaptive" Custom Plan Provider: "Citus Adaptive"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Distributed Query: Distributed Query:
Job: Job:
Task Count: 6 Task Count: 6
@ -1684,6 +1717,7 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
"Plan": { + "Plan": { +
"Node Type": "Result", + "Node Type": "Result", +
"Parallel Aware": false,+ "Parallel Aware": false,+
"Async Capable": false, +
"Actual Rows": 1, + "Actual Rows": 1, +
"Actual Loops": 1 + "Actual Loops": 1 +
}, + }, +
@ -1707,6 +1741,7 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
<Plan> + <Plan> +
<Node-Type>Result</Node-Type> + <Node-Type>Result</Node-Type> +
<Parallel-Aware>false</Parallel-Aware> + <Parallel-Aware>false</Parallel-Aware> +
<Async-Capable>false</Async-Capable> +
<Actual-Rows>1</Actual-Rows> + <Actual-Rows>1</Actual-Rows> +
<Actual-Loops>1</Actual-Loops> + <Actual-Loops>1</Actual-Loops> +
</Plan> + </Plan> +
@ -1728,6 +1763,7 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
- Plan: + - Plan: +
Node Type: "Result" + Node Type: "Result" +
Parallel Aware: false+ Parallel Aware: false+
Async Capable: false +
Actual Rows: 1 + Actual Rows: 1 +
Actual Loops: 1 + Actual Loops: 1 +
Triggers: Triggers:
@ -2115,6 +2151,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT
"Node Type": "Custom Scan", "Node Type": "Custom Scan",
"Custom Plan Provider": "Citus Adaptive", "Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Actual Rows": 0, "Actual Rows": 0,
"Actual Loops": 1, "Actual Loops": 1,
"Distributed Query": { "Distributed Query": {
@ -2131,6 +2168,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT
"Node Type": "ModifyTable", "Node Type": "ModifyTable",
"Operation": "Insert", "Operation": "Insert",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Relation Name": "explain_pk_570013", "Relation Name": "explain_pk_570013",
"Alias": "citus_table_alias", "Alias": "citus_table_alias",
"Actual Rows": 0, "Actual Rows": 0,
@ -2138,7 +2176,9 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT
"Plans": [ "Plans": [
{ {
"Node Type": "Result", "Node Type": "Result",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Actual Rows": 1, "Actual Rows": 1,
"Actual Loops": 1 "Actual Loops": 1
} }
@ -2167,6 +2207,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * F
"Node Type": "Custom Scan", "Node Type": "Custom Scan",
"Custom Plan Provider": "Citus Adaptive", "Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Actual Rows": 0, "Actual Rows": 0,
"Actual Loops": 1, "Actual Loops": 1,
"Distributed Query": { "Distributed Query": {
@ -2184,6 +2225,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * F
"Plan": { "Plan": {
"Node Type": "Seq Scan", "Node Type": "Seq Scan",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Relation Name": "explain_pk_570013", "Relation Name": "explain_pk_570013",
"Alias": "explain_pk", "Alias": "explain_pk",
"Actual Rows": 0, "Actual Rows": 0,
@ -2212,6 +2254,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO
<Node-Type>Custom Scan</Node-Type> <Node-Type>Custom Scan</Node-Type>
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider> <Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Actual-Rows>0</Actual-Rows> <Actual-Rows>0</Actual-Rows>
<Actual-Loops>1</Actual-Loops> <Actual-Loops>1</Actual-Loops>
<Distributed-Query> <Distributed-Query>
@ -2228,6 +2271,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO
<Node-Type>ModifyTable</Node-Type> <Node-Type>ModifyTable</Node-Type>
<Operation>Insert</Operation> <Operation>Insert</Operation>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Relation-Name>explain_pk_570013</Relation-Name> <Relation-Name>explain_pk_570013</Relation-Name>
<Alias>citus_table_alias</Alias> <Alias>citus_table_alias</Alias>
<Actual-Rows>0</Actual-Rows> <Actual-Rows>0</Actual-Rows>
@ -2235,7 +2279,9 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Result</Node-Type> <Node-Type>Result</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Actual-Rows>1</Actual-Rows> <Actual-Rows>1</Actual-Rows>
<Actual-Loops>1</Actual-Loops> <Actual-Loops>1</Actual-Loops>
</Plan> </Plan>
@ -2263,6 +2309,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FR
<Node-Type>Custom Scan</Node-Type> <Node-Type>Custom Scan</Node-Type>
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider> <Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Actual-Rows>0</Actual-Rows> <Actual-Rows>0</Actual-Rows>
<Actual-Loops>1</Actual-Loops> <Actual-Loops>1</Actual-Loops>
<Distributed-Query> <Distributed-Query>
@ -2280,6 +2327,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FR
<Plan> <Plan>
<Node-Type>Seq Scan</Node-Type> <Node-Type>Seq Scan</Node-Type>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Relation-Name>explain_pk_570013</Relation-Name> <Relation-Name>explain_pk_570013</Relation-Name>
<Alias>explain_pk</Alias> <Alias>explain_pk</Alias>
<Actual-Rows>0</Actual-Rows> <Actual-Rows>0</Actual-Rows>

View File

@ -1,7 +1,7 @@
-- --
-- MULTI_METADATA_SYNC -- MULTI_METADATA_SYNC
-- --
-- this test has different output for PG13/14 compared to PG15 -- this test has different output for PG14 compared to PG15
-- In PG15, public schema is owned by pg_database_owner role -- In PG15, public schema is owned by pg_database_owner role
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
SHOW server_version \gset SHOW server_version \gset

View File

@ -1,7 +1,7 @@
-- --
-- MULTI_METADATA_SYNC -- MULTI_METADATA_SYNC
-- --
-- this test has different output for PG13/14 compared to PG15 -- this test has different output for PG14 compared to PG15
-- In PG15, public schema is owned by pg_database_owner role -- In PG15, public schema is owned by pg_database_owner role
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
SHOW server_version \gset SHOW server_version \gset

View File

@ -85,19 +85,24 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Plan": { "Plan": {
"Node Type": "Sort", "Node Type": "Sort",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"], "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
"Plans": [ "Plans": [
{ {
"Node Type": "Aggregate", "Node Type": "Aggregate",
"Strategy": "Hashed", "Strategy": "Hashed",
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Group Key": ["remote_scan.l_quantity"], "Group Key": ["remote_scan.l_quantity"],
"Plans": [ "Plans": [
{ {
"Node Type": "Custom Scan", "Node Type": "Custom Scan",
"Parent Relationship": "Outer",
"Custom Plan Provider": "Citus Adaptive", "Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Distributed Query": { "Distributed Query": {
"Job": { "Job": {
"Task Count": 16, "Task Count": 16,
@ -113,11 +118,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Strategy": "Hashed", "Strategy": "Hashed",
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Group Key": ["l_quantity"], "Group Key": ["l_quantity"],
"Plans": [ "Plans": [
{ {
"Node Type": "Seq Scan", "Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Relation Name": "lineitem_mx_1220052", "Relation Name": "lineitem_mx_1220052",
"Alias": "lineitem_mx" "Alias": "lineitem_mx"
} }
@ -153,6 +161,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Plan> <Plan>
<Node-Type>Sort</Node-Type> <Node-Type>Sort</Node-Type>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Sort-Key> <Sort-Key>
<Item>(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))</Item> <Item>(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))</Item>
<Item>remote_scan.l_quantity</Item> <Item>remote_scan.l_quantity</Item>
@ -162,15 +171,19 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Node-Type>Aggregate</Node-Type> <Node-Type>Aggregate</Node-Type>
<Strategy>Hashed</Strategy> <Strategy>Hashed</Strategy>
<Partial-Mode>Simple</Partial-Mode> <Partial-Mode>Simple</Partial-Mode>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Group-Key> <Group-Key>
<Item>remote_scan.l_quantity</Item> <Item>remote_scan.l_quantity</Item>
</Group-Key> </Group-Key>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Custom Scan</Node-Type> <Node-Type>Custom Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider> <Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Distributed-Query> <Distributed-Query>
<Job> <Job>
<Task-Count>16</Task-Count> <Task-Count>16</Task-Count>
@ -186,13 +199,16 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Strategy>Hashed</Strategy> <Strategy>Hashed</Strategy>
<Partial-Mode>Simple</Partial-Mode> <Partial-Mode>Simple</Partial-Mode>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Group-Key> <Group-Key>
<Item>l_quantity</Item> <Item>l_quantity</Item>
</Group-Key> </Group-Key>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Seq Scan</Node-Type> <Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Relation-Name>lineitem_mx_1220052</Relation-Name> <Relation-Name>lineitem_mx_1220052</Relation-Name>
<Alias>lineitem_mx</Alias> <Alias>lineitem_mx</Alias>
</Plan> </Plan>
@ -224,6 +240,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
- Plan: - Plan:
Node Type: "Sort" Node Type: "Sort"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Sort Key: Sort Key:
- "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))" - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))"
- "remote_scan.l_quantity" - "remote_scan.l_quantity"
@ -231,13 +248,17 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
- Node Type: "Aggregate" - Node Type: "Aggregate"
Strategy: "Hashed" Strategy: "Hashed"
Partial Mode: "Simple" Partial Mode: "Simple"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Group Key: Group Key:
- "remote_scan.l_quantity" - "remote_scan.l_quantity"
Plans: Plans:
- Node Type: "Custom Scan" - Node Type: "Custom Scan"
Parent Relationship: "Outer"
Custom Plan Provider: "Citus Adaptive" Custom Plan Provider: "Citus Adaptive"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Distributed Query: Distributed Query:
Job: Job:
Task Count: 16 Task Count: 16
@ -250,11 +271,14 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
Strategy: "Hashed" Strategy: "Hashed"
Partial Mode: "Simple" Partial Mode: "Simple"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Group Key: Group Key:
- "l_quantity" - "l_quantity"
Plans: Plans:
- Node Type: "Seq Scan" - Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Relation Name: "lineitem_mx_1220052" Relation Name: "lineitem_mx_1220052"
Alias: "lineitem_mx" Alias: "lineitem_mx"
@ -528,11 +552,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Strategy": "Plain", "Strategy": "Plain",
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Custom Scan", "Node Type": "Custom Scan",
"Parent Relationship": "Outer",
"Custom Plan Provider": "Citus Adaptive", "Custom Plan Provider": "Citus Adaptive",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Distributed Query": { "Distributed Query": {
"Job": { "Job": {
"Task Count": 16, "Task Count": 16,
@ -548,34 +575,45 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Strategy": "Plain", "Strategy": "Plain",
"Partial Mode": "Simple", "Partial Mode": "Simple",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Hash Join", "Node Type": "Hash Join",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Join Type": "Inner", "Join Type": "Inner",
"Inner Unique": false, "Inner Unique": false,
"Hash Cond": "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)", "Hash Cond": "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)",
"Plans": [ "Plans": [
{ {
"Node Type": "Hash Join", "Node Type": "Hash Join",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Join Type": "Inner", "Join Type": "Inner",
"Inner Unique": false, "Inner Unique": false,
"Hash Cond": "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)", "Hash Cond": "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)",
"Plans": [ "Plans": [
{ {
"Node Type": "Seq Scan", "Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Relation Name": "supplier_mx_1220087", "Relation Name": "supplier_mx_1220087",
"Alias": "supplier_mx" "Alias": "supplier_mx"
}, },
{ {
"Node Type": "Hash", "Node Type": "Hash",
"Parent Relationship": "Inner",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Seq Scan", "Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Relation Name": "lineitem_mx_1220052", "Relation Name": "lineitem_mx_1220052",
"Alias": "lineitem_mx" "Alias": "lineitem_mx"
} }
@ -585,28 +623,38 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
}, },
{ {
"Node Type": "Hash", "Node Type": "Hash",
"Parent Relationship": "Inner",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Hash Join", "Node Type": "Hash Join",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Join Type": "Inner", "Join Type": "Inner",
"Inner Unique": false, "Inner Unique": false,
"Hash Cond": "(customer_mx.c_custkey = orders_mx.o_custkey)", "Hash Cond": "(customer_mx.c_custkey = orders_mx.o_custkey)",
"Plans": [ "Plans": [
{ {
"Node Type": "Seq Scan", "Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Relation Name": "customer_mx_1220084", "Relation Name": "customer_mx_1220084",
"Alias": "customer_mx" "Alias": "customer_mx"
}, },
{ {
"Node Type": "Hash", "Node Type": "Hash",
"Parent Relationship": "Inner",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Plans": [ "Plans": [
{ {
"Node Type": "Seq Scan", "Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false, "Parallel Aware": false,
"Async Capable": false,
"Relation Name": "orders_mx_1220068", "Relation Name": "orders_mx_1220068",
"Alias": "orders_mx" "Alias": "orders_mx"
} }
@ -653,11 +701,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Strategy>Plain</Strategy> <Strategy>Plain</Strategy>
<Partial-Mode>Simple</Partial-Mode> <Partial-Mode>Simple</Partial-Mode>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Custom Scan</Node-Type> <Node-Type>Custom Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider> <Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Distributed-Query> <Distributed-Query>
<Job> <Job>
<Task-Count>16</Task-Count> <Task-Count>16</Task-Count>
@ -673,34 +724,45 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Strategy>Plain</Strategy> <Strategy>Plain</Strategy>
<Partial-Mode>Simple</Partial-Mode> <Partial-Mode>Simple</Partial-Mode>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Hash Join</Node-Type> <Node-Type>Hash Join</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Join-Type>Inner</Join-Type> <Join-Type>Inner</Join-Type>
<Inner-Unique>false</Inner-Unique> <Inner-Unique>false</Inner-Unique>
<Hash-Cond>(lineitem_mx.l_orderkey = orders_mx.o_orderkey)</Hash-Cond> <Hash-Cond>(lineitem_mx.l_orderkey = orders_mx.o_orderkey)</Hash-Cond>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Hash Join</Node-Type> <Node-Type>Hash Join</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Join-Type>Inner</Join-Type> <Join-Type>Inner</Join-Type>
<Inner-Unique>false</Inner-Unique> <Inner-Unique>false</Inner-Unique>
<Hash-Cond>(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)</Hash-Cond> <Hash-Cond>(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)</Hash-Cond>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Seq Scan</Node-Type> <Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Relation-Name>supplier_mx_1220087</Relation-Name> <Relation-Name>supplier_mx_1220087</Relation-Name>
<Alias>supplier_mx</Alias> <Alias>supplier_mx</Alias>
</Plan> </Plan>
<Plan> <Plan>
<Node-Type>Hash</Node-Type> <Node-Type>Hash</Node-Type>
<Parent-Relationship>Inner</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Seq Scan</Node-Type> <Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Relation-Name>lineitem_mx_1220052</Relation-Name> <Relation-Name>lineitem_mx_1220052</Relation-Name>
<Alias>lineitem_mx</Alias> <Alias>lineitem_mx</Alias>
</Plan> </Plan>
@ -710,28 +772,38 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
</Plan> </Plan>
<Plan> <Plan>
<Node-Type>Hash</Node-Type> <Node-Type>Hash</Node-Type>
<Parent-Relationship>Inner</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Hash Join</Node-Type> <Node-Type>Hash Join</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Join-Type>Inner</Join-Type> <Join-Type>Inner</Join-Type>
<Inner-Unique>false</Inner-Unique> <Inner-Unique>false</Inner-Unique>
<Hash-Cond>(customer_mx.c_custkey = orders_mx.o_custkey)</Hash-Cond> <Hash-Cond>(customer_mx.c_custkey = orders_mx.o_custkey)</Hash-Cond>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Seq Scan</Node-Type> <Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Relation-Name>customer_mx_1220084</Relation-Name> <Relation-Name>customer_mx_1220084</Relation-Name>
<Alias>customer_mx</Alias> <Alias>customer_mx</Alias>
</Plan> </Plan>
<Plan> <Plan>
<Node-Type>Hash</Node-Type> <Node-Type>Hash</Node-Type>
<Parent-Relationship>Inner</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Plans> <Plans>
<Plan> <Plan>
<Node-Type>Seq Scan</Node-Type> <Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Async-Capable>false</Async-Capable>
<Relation-Name>orders_mx_1220068</Relation-Name> <Relation-Name>orders_mx_1220068</Relation-Name>
<Alias>orders_mx</Alias> <Alias>orders_mx</Alias>
</Plan> </Plan>
@ -775,10 +847,13 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
Strategy: "Plain" Strategy: "Plain"
Partial Mode: "Simple" Partial Mode: "Simple"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Plans: Plans:
- Node Type: "Custom Scan" - Node Type: "Custom Scan"
Parent Relationship: "Outer"
Custom Plan Provider: "Citus Adaptive" Custom Plan Provider: "Citus Adaptive"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Distributed Query: Distributed Query:
Job: Job:
Task Count: 16 Task Count: 16
@ -791,48 +866,69 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
Strategy: "Plain" Strategy: "Plain"
Partial Mode: "Simple" Partial Mode: "Simple"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Plans: Plans:
- Node Type: "Hash Join" - Node Type: "Hash Join"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Join Type: "Inner" Join Type: "Inner"
Inner Unique: false Inner Unique: false
Hash Cond: "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)" Hash Cond: "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)"
Plans: Plans:
- Node Type: "Hash Join" - Node Type: "Hash Join"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Join Type: "Inner" Join Type: "Inner"
Inner Unique: false Inner Unique: false
Hash Cond: "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)" Hash Cond: "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)"
Plans: Plans:
- Node Type: "Seq Scan" - Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Relation Name: "supplier_mx_1220087" Relation Name: "supplier_mx_1220087"
Alias: "supplier_mx" Alias: "supplier_mx"
- Node Type: "Hash" - Node Type: "Hash"
Parent Relationship: "Inner"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Plans: Plans:
- Node Type: "Seq Scan" - Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Relation Name: "lineitem_mx_1220052" Relation Name: "lineitem_mx_1220052"
Alias: "lineitem_mx" Alias: "lineitem_mx"
- Node Type: "Hash" - Node Type: "Hash"
Parent Relationship: "Inner"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Plans: Plans:
- Node Type: "Hash Join" - Node Type: "Hash Join"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Join Type: "Inner" Join Type: "Inner"
Inner Unique: false Inner Unique: false
Hash Cond: "(customer_mx.c_custkey = orders_mx.o_custkey)" Hash Cond: "(customer_mx.c_custkey = orders_mx.o_custkey)"
Plans: Plans:
- Node Type: "Seq Scan" - Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Relation Name: "customer_mx_1220084" Relation Name: "customer_mx_1220084"
Alias: "customer_mx" Alias: "customer_mx"
- Node Type: "Hash" - Node Type: "Hash"
Parent Relationship: "Inner"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Plans: Plans:
- Node Type: "Seq Scan" - Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false Parallel Aware: false
Async Capable: false
Relation Name: "orders_mx_1220068" Relation Name: "orders_mx_1220068"
Alias: "orders_mx" Alias: "orders_mx"

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14
\gset
\if :server_version_ge_14
\else
\q
\endif
create schema pg14; create schema pg14;
set search_path to pg14; set search_path to pg14;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;

View File

@ -1,6 +0,0 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14
\gset
\if :server_version_ge_14
\else
\q

View File

@ -37,7 +37,7 @@ CREATE PROCEDURE test_procedure_commit(tt_id int, tt_org_id int) LANGUAGE SQL AS
COMMIT; COMMIT;
$$; $$;
CALL test_procedure_commit(2,5); CALL test_procedure_commit(2,5);
ERROR: COMMIT is not allowed in a SQL function ERROR: COMMIT is not allowed in an SQL function
CONTEXT: SQL function "test_procedure_commit" during startup CONTEXT: SQL function "test_procedure_commit" during startup
SELECT * FROM test_table ORDER BY 1, 2; SELECT * FROM test_table ORDER BY 1, 2;
id | org_id id | org_id
@ -52,7 +52,7 @@ CREATE PROCEDURE test_procedure_rollback(tt_id int, tt_org_id int) LANGUAGE SQL
COMMIT; COMMIT;
$$; $$;
CALL test_procedure_rollback(2,15); CALL test_procedure_rollback(2,15);
ERROR: ROLLBACK is not allowed in a SQL function ERROR: ROLLBACK is not allowed in an SQL function
CONTEXT: SQL function "test_procedure_rollback" during startup CONTEXT: SQL function "test_procedure_rollback" during startup
SELECT * FROM test_table ORDER BY 1, 2; SELECT * FROM test_table ORDER BY 1, 2;
id | org_id id | org_id

View File

@ -2,12 +2,7 @@
-- stat_statements -- stat_statements
-- --
-- tests citus_stat_statements functionality -- tests citus_stat_statements functionality
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14
\gset
\if :server_version_ge_14
SET compute_query_id = 'on'; SET compute_query_id = 'on';
\endif
-- check if pg_stat_statements is available -- check if pg_stat_statements is available
SELECT name FROM pg_available_extensions WHERE name = 'pg_stat_statements'; SELECT name FROM pg_available_extensions WHERE name = 'pg_stat_statements';
name name
@ -72,11 +67,7 @@ select query, calls from citus_stat_statements();
insert into test values($1) | 1 insert into test values($1) | 1
(1 row) (1 row)
\if :server_version_ge_14
SET compute_query_id = 'off'; SET compute_query_id = 'off';
\else
set citus.stat_statements_track = 'none';
\endif
-- for pg >= 14, since compute_query_id is off, this insert -- for pg >= 14, since compute_query_id is off, this insert
-- shouldn't be tracked -- shouldn't be tracked
-- for pg < 14, we disable it explicitly so that we don't need -- for pg < 14, we disable it explicitly so that we don't need
@ -88,11 +79,7 @@ select query, calls from citus_stat_statements();
insert into test values($1) | 1 insert into test values($1) | 1
(1 row) (1 row)
\if :server_version_ge_14
SET compute_query_id = 'on'; SET compute_query_id = 'on';
\else
RESET citus.stat_statements_track;
\endif
SELECT citus_stat_statements_reset(); SELECT citus_stat_statements_reset();
citus_stat_statements_reset citus_stat_statements_reset
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -646,6 +633,4 @@ CONTEXT: PL/pgSQL function citus_stat_statements() line XX at RAISE
-- drop created tables -- drop created tables
DROP TABLE stat_test_text, stat_test_bigint, stat_test_bigint_other, stat_test_reference; DROP TABLE stat_test_text, stat_test_bigint, stat_test_bigint_other, stat_test_reference;
DROP FUNCTION normalize_query_string(text); DROP FUNCTION normalize_query_string(text);
\if :server_version_ge_14
SET compute_query_id = 'off'; SET compute_query_id = 'off';
\endif

View File

@ -114,7 +114,7 @@ delete from test_ref;
ERROR: fake_tuple_delete not implemented ERROR: fake_tuple_delete not implemented
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
update test_ref set a=2; update test_ref set a=2;
ERROR: fake_tuple_update not implemented ERROR: fake_fetch_row_version not implemented
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
RESET client_min_messages; RESET client_min_messages;
-- ddl events should include "USING fake_am" -- ddl events should include "USING fake_am"

View File

@ -3,8 +3,6 @@
-- =================================================================== -- ===================================================================
-- test top level window functions that are pushdownable -- test top level window functions that are pushdownable
-- =================================================================== -- ===================================================================
-- This test file has an alternative output because of use of
-- incremental sort in some explain outputs in PG13
-- --
-- a very simple window function with an aggregate and a window function -- a very simple window function with an aggregate and a window function
-- distribution column is on the partition by clause -- distribution column is on the partition by clause

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,6 @@
// Three alternative test outputs: // Two alternative test outputs:
// isolation_master_update_node.out for PG15 // isolation_master_update_node.out for PG15
// isolation_master_update_node_0.out for PG14 // isolation_master_update_node_0.out for PG14
// isolation_master_update_node_1.out for PG13
setup setup
{ {

View File

@ -63,9 +63,6 @@ SET search_path TO cpu_priority;
-- in their CREATE SUBSCRIPTION commands. -- in their CREATE SUBSCRIPTION commands.
SET citus.log_remote_commands TO ON; SET citus.log_remote_commands TO ON;
SET citus.grep_remote_commands = '%CREATE SUBSCRIPTION%'; SET citus.grep_remote_commands = '%CREATE SUBSCRIPTION%';
-- We disable binary protocol, so we have consistent output between PG13 and
-- PG14, beacuse PG13 doesn't support binary logical replication.
SET citus.enable_binary_protocol = false;
SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
SET citus.cpu_priority_for_logical_replication_senders = 15; SET citus.cpu_priority_for_logical_replication_senders = 15;
SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical'); SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical');

View File

@ -1,7 +1,3 @@
-- This test file has an alternative output because of error messages vary for PG13
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13;
CREATE SCHEMA generated_identities; CREATE SCHEMA generated_identities;
SET search_path TO generated_identities; SET search_path TO generated_identities;
SET client_min_messages to ERROR; SET client_min_messages to ERROR;

View File

@ -1,7 +1,7 @@
-- --
-- GRANT_ON_SCHEMA_PROPAGATION -- GRANT_ON_SCHEMA_PROPAGATION
-- --
-- this test has different output for PG13/14 compared to PG15 -- this test has different output for PG14 compared to PG15
-- In PG15, public schema is owned by pg_database_owner role -- In PG15, public schema is owned by pg_database_owner role
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
SHOW server_version \gset SHOW server_version \gset
@ -189,6 +189,9 @@ DROP SCHEMA dist_schema CASCADE;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SELECT master_remove_node('localhost', :worker_2_port); SELECT master_remove_node('localhost', :worker_2_port);
-- to avoid different output in PG15
GRANT CREATE ON SCHEMA public TO public;
-- distribute the public schema (it has to be distributed by now but just in case) -- distribute the public schema (it has to be distributed by now but just in case)
CREATE TABLE public_schema_table (id INT); CREATE TABLE public_schema_table (id INT);
SELECT create_distributed_table('public_schema_table', 'id'); SELECT create_distributed_table('public_schema_table', 'id');

View File

@ -1,7 +1,7 @@
-- --
-- MULTI_METADATA_SYNC -- MULTI_METADATA_SYNC
-- --
-- this test has different output for PG13/14 compared to PG15 -- this test has different output for PG14 compared to PG15
-- In PG15, public schema is owned by pg_database_owner role -- In PG15, public schema is owned by pg_database_owner role
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 -- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
SHOW server_version \gset SHOW server_version \gset

View File

@ -1,11 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14
\gset
\if :server_version_ge_14
\else
\q
\endif
create schema pg14; create schema pg14;
set search_path to pg14; set search_path to pg14;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;

View File

@ -3,12 +3,7 @@
-- --
-- tests citus_stat_statements functionality -- tests citus_stat_statements functionality
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14
\gset
\if :server_version_ge_14
SET compute_query_id = 'on'; SET compute_query_id = 'on';
\endif
-- check if pg_stat_statements is available -- check if pg_stat_statements is available
SELECT name FROM pg_available_extensions WHERE name = 'pg_stat_statements'; SELECT name FROM pg_available_extensions WHERE name = 'pg_stat_statements';
@ -50,11 +45,7 @@ SELECT create_distributed_table('test','a');
insert into test values(1); insert into test values(1);
select query, calls from citus_stat_statements(); select query, calls from citus_stat_statements();
\if :server_version_ge_14
SET compute_query_id = 'off'; SET compute_query_id = 'off';
\else
set citus.stat_statements_track = 'none';
\endif
-- for pg >= 14, since compute_query_id is off, this insert -- for pg >= 14, since compute_query_id is off, this insert
-- shouldn't be tracked -- shouldn't be tracked
@ -64,11 +55,7 @@ insert into test values(1);
select query, calls from citus_stat_statements(); select query, calls from citus_stat_statements();
\if :server_version_ge_14
SET compute_query_id = 'on'; SET compute_query_id = 'on';
\else
RESET citus.stat_statements_track;
\endif
SELECT citus_stat_statements_reset(); SELECT citus_stat_statements_reset();
@ -290,6 +277,4 @@ DROP TABLE stat_test_text, stat_test_bigint, stat_test_bigint_other, stat_test_r
DROP FUNCTION normalize_query_string(text); DROP FUNCTION normalize_query_string(text);
\if :server_version_ge_14
SET compute_query_id = 'off'; SET compute_query_id = 'off';
\endif

View File

@ -3,8 +3,6 @@
-- =================================================================== -- ===================================================================
-- test top level window functions that are pushdownable -- test top level window functions that are pushdownable
-- =================================================================== -- ===================================================================
-- This test file has an alternative output because of use of
-- incremental sort in some explain outputs in PG13
-- --
-- a very simple window function with an aggregate and a window function -- a very simple window function with an aggregate and a window function