mirror of https://github.com/citusdata/citus.git
Drop PG13 Support Phase 2 - Remove PG13 specific paths/tests (#7007)
This commit is the second and last phase of dropping PG13 support.
It consists of the following:
- Removes all PG_VERSION_13 & PG_VERSION_14 from codepaths
- Removes pg_version_compat entries and columnar_version_compat entries
specific for PG13
- Removes alternative pg13 test outputs
- Removes PG13 normalize lines and fix the test outputs based on that
It is a continuation of 5bf163a27d
pull/6984/head
parent
1bb667ce6e
commit
69af3e8509
|
@ -159,5 +159,5 @@ MemoryContextTotals(MemoryContext context, MemoryContextCounters *counters)
|
|||
MemoryContextTotals(child, counters);
|
||||
}
|
||||
|
||||
context->methods->stats_compat(context, NULL, NULL, counters, true);
|
||||
context->methods->stats(context, NULL, NULL, counters, true);
|
||||
}
|
||||
|
|
|
@ -1623,12 +1623,8 @@ StartModifyRelation(Relation rel)
|
|||
{
|
||||
EState *estate = create_estate_for_relation(rel);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
ResultRelInfo *resultRelInfo = makeNode(ResultRelInfo);
|
||||
InitResultRelInfo(resultRelInfo, rel, 1, NULL, 0);
|
||||
#else
|
||||
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
|
||||
#endif
|
||||
|
||||
/* ExecSimpleRelationInsert, ... require caller to open indexes */
|
||||
ExecOpenIndices(resultRelInfo, false);
|
||||
|
@ -1658,7 +1654,7 @@ InsertTupleAndEnforceConstraints(ModifyState *state, Datum *values, bool *nulls)
|
|||
ExecStoreHeapTuple(tuple, slot, false);
|
||||
|
||||
/* use ExecSimpleRelationInsert to enforce constraints */
|
||||
ExecSimpleRelationInsert_compat(state->resultRelInfo, state->estate, slot);
|
||||
ExecSimpleRelationInsert(state->resultRelInfo, state->estate, slot);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1689,12 +1685,8 @@ FinishModifyRelation(ModifyState *state)
|
|||
ExecCloseIndices(state->resultRelInfo);
|
||||
|
||||
AfterTriggerEndQuery(state->estate);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
ExecCloseResultRelations(state->estate);
|
||||
ExecCloseRangeTableRelations(state->estate);
|
||||
#else
|
||||
ExecCleanUpTriggerState(state->estate);
|
||||
#endif
|
||||
ExecResetTupleTable(state->estate->es_tupleTable, false);
|
||||
FreeExecutorState(state->estate);
|
||||
|
||||
|
@ -1723,15 +1715,6 @@ create_estate_for_relation(Relation rel)
|
|||
rte->rellockmode = AccessShareLock;
|
||||
ExecInitRangeTable(estate, list_make1(rte));
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
ResultRelInfo *resultRelInfo = makeNode(ResultRelInfo);
|
||||
InitResultRelInfo(resultRelInfo, rel, 1, NULL, 0);
|
||||
|
||||
estate->es_result_relations = resultRelInfo;
|
||||
estate->es_num_result_relations = 1;
|
||||
estate->es_result_relation_info = resultRelInfo;
|
||||
#endif
|
||||
|
||||
estate->es_output_cid = GetCurrentCommandId(true);
|
||||
|
||||
/* Prepare to catch AFTER triggers. */
|
||||
|
|
|
@ -115,9 +115,7 @@ static RangeVar * ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt,
|
|||
List **columnarOptions);
|
||||
static void ColumnarProcessUtility(PlannedStmt *pstmt,
|
||||
const char *queryString,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
bool readOnlyTree,
|
||||
#endif
|
||||
ProcessUtilityContext context,
|
||||
ParamListInfo params,
|
||||
struct QueryEnvironment *queryEnv,
|
||||
|
@ -665,7 +663,6 @@ columnar_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
static TransactionId
|
||||
columnar_index_delete_tuples(Relation rel,
|
||||
TM_IndexDeleteOp *delstate)
|
||||
|
@ -714,19 +711,6 @@ columnar_index_delete_tuples(Relation rel,
|
|||
}
|
||||
|
||||
|
||||
#else
|
||||
static TransactionId
|
||||
columnar_compute_xid_horizon_for_tuples(Relation rel,
|
||||
ItemPointerData *tids,
|
||||
int nitems)
|
||||
{
|
||||
elog(ERROR, "columnar_compute_xid_horizon_for_tuples not implemented");
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
static void
|
||||
columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid,
|
||||
int options, BulkInsertState bistate)
|
||||
|
@ -1484,8 +1468,7 @@ columnar_index_build_range_scan(Relation columnarRelation,
|
|||
if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent)
|
||||
{
|
||||
/* ignore lazy VACUUM's */
|
||||
OldestXmin = GetOldestNonRemovableTransactionId_compat(columnarRelation,
|
||||
PROCARRAY_FLAGS_VACUUM);
|
||||
OldestXmin = GetOldestNonRemovableTransactionId(columnarRelation);
|
||||
}
|
||||
|
||||
Snapshot snapshot = { 0 };
|
||||
|
@ -1813,8 +1796,8 @@ ColumnarReadMissingRowsIntoIndex(TableScanDesc scan, Relation indexRelation,
|
|||
Relation columnarRelation = scan->rs_rd;
|
||||
IndexUniqueCheck indexUniqueCheck =
|
||||
indexInfo->ii_Unique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO;
|
||||
index_insert_compat(indexRelation, indexValues, indexNulls, columnarItemPointer,
|
||||
columnarRelation, indexUniqueCheck, false, indexInfo);
|
||||
index_insert(indexRelation, indexValues, indexNulls, columnarItemPointer,
|
||||
columnarRelation, indexUniqueCheck, false, indexInfo);
|
||||
|
||||
validateIndexState->tups_inserted += 1;
|
||||
}
|
||||
|
@ -2240,21 +2223,17 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
|
|||
static void
|
||||
ColumnarProcessUtility(PlannedStmt *pstmt,
|
||||
const char *queryString,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
bool readOnlyTree,
|
||||
#endif
|
||||
ProcessUtilityContext context,
|
||||
ParamListInfo params,
|
||||
struct QueryEnvironment *queryEnv,
|
||||
DestReceiver *dest,
|
||||
QueryCompletion *completionTag)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
if (readOnlyTree)
|
||||
{
|
||||
pstmt = copyObject(pstmt);
|
||||
}
|
||||
#endif
|
||||
|
||||
Node *parsetree = pstmt->utilityStmt;
|
||||
|
||||
|
@ -2371,8 +2350,8 @@ ColumnarProcessUtility(PlannedStmt *pstmt,
|
|||
CheckCitusColumnarAlterExtensionStmt(parsetree);
|
||||
}
|
||||
|
||||
PrevProcessUtilityHook_compat(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
PrevProcessUtilityHook(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
|
||||
if (columnarOptions != NIL)
|
||||
{
|
||||
|
@ -2500,11 +2479,7 @@ static const TableAmRoutine columnar_am_methods = {
|
|||
.tuple_get_latest_tid = columnar_get_latest_tid,
|
||||
.tuple_tid_valid = columnar_tuple_tid_valid,
|
||||
.tuple_satisfies_snapshot = columnar_tuple_satisfies_snapshot,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
.index_delete_tuples = columnar_index_delete_tuples,
|
||||
#else
|
||||
.compute_xid_horizon_for_tuples = columnar_compute_xid_horizon_for_tuples,
|
||||
#endif
|
||||
|
||||
.tuple_insert = columnar_tuple_insert,
|
||||
.tuple_insert_speculative = columnar_tuple_insert_speculative,
|
||||
|
|
|
@ -81,13 +81,6 @@ CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig)
|
|||
{
|
||||
Assert((sig == SIGINT) || (sig == SIGTERM));
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
if (timeout != 0)
|
||||
{
|
||||
elog(ERROR, "timeout parameter is only supported on Postgres 14 or later");
|
||||
}
|
||||
#endif
|
||||
|
||||
bool missingOk = false;
|
||||
int nodeId = ExtractNodeIdFromGlobalPID(globalPID, missingOk);
|
||||
int processId = ExtractProcessIdFromGlobalPID(globalPID);
|
||||
|
@ -102,14 +95,9 @@ CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig)
|
|||
}
|
||||
else
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
appendStringInfo(cancelQuery,
|
||||
"SELECT pg_terminate_backend(%d::integer, %lu::bigint)",
|
||||
processId, timeout);
|
||||
#else
|
||||
appendStringInfo(cancelQuery, "SELECT pg_terminate_backend(%d::integer)",
|
||||
processId);
|
||||
#endif
|
||||
}
|
||||
|
||||
int connectionFlags = 0;
|
||||
|
|
|
@ -114,13 +114,6 @@ PreprocessClusterStmt(Node *node, const char *clusterCommand,
|
|||
static bool
|
||||
IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt)
|
||||
{
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
if (clusterStmt->options & CLUOPT_VERBOSE)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
#else
|
||||
DefElem *opt = NULL;
|
||||
foreach_ptr(opt, clusterStmt->params)
|
||||
{
|
||||
|
@ -130,5 +123,4 @@ IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt)
|
|||
}
|
||||
}
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -214,13 +214,7 @@ DeferErrorIfCircularDependencyExists(const ObjectAddress *objectAddress)
|
|||
dependency->objectId == objectAddress->objectId &&
|
||||
dependency->objectSubId == objectAddress->objectSubId)
|
||||
{
|
||||
char *objectDescription = NULL;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
objectDescription = getObjectDescription(objectAddress, false);
|
||||
#else
|
||||
objectDescription = getObjectDescription(objectAddress);
|
||||
#endif
|
||||
char *objectDescription = getObjectDescription(objectAddress, false);
|
||||
|
||||
StringInfo detailInfo = makeStringInfo();
|
||||
appendStringInfo(detailInfo, "\"%s\" circularly depends itself, resolve "
|
||||
|
@ -529,9 +523,9 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
|
|||
*/
|
||||
Assert(false);
|
||||
ereport(ERROR, (errmsg("unsupported object %s for distribution by citus",
|
||||
getObjectTypeDescription_compat(dependency,
|
||||
getObjectTypeDescription(dependency,
|
||||
|
||||
/* missingOk: */ false)),
|
||||
/* missingOk: */ false)),
|
||||
errdetail(
|
||||
"citus tries to recreate an unsupported object on its workers"),
|
||||
errhint("please report a bug as this should not be happening")));
|
||||
|
|
|
@ -1531,7 +1531,7 @@ GetDistributeObjectOps(Node *node)
|
|||
case T_AlterTableStmt:
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
switch (AlterTableStmtObjType_compat(stmt))
|
||||
switch (stmt->objtype)
|
||||
{
|
||||
case OBJECT_TYPE:
|
||||
{
|
||||
|
|
|
@ -206,11 +206,7 @@ MakeCollateClauseFromOid(Oid collationOid)
|
|||
List *objName = NIL;
|
||||
List *objArgs = NIL;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
getObjectIdentityParts(&collateAddress, &objName, &objArgs, false);
|
||||
#else
|
||||
getObjectIdentityParts(&collateAddress, &objName, &objArgs);
|
||||
#endif
|
||||
|
||||
char *name = NULL;
|
||||
foreach_ptr(name, objName)
|
||||
|
|
|
@ -1641,7 +1641,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString,
|
|||
* workers
|
||||
*/
|
||||
const char *functionName =
|
||||
getObjectIdentity_compat(address, /* missingOk: */ false);
|
||||
getObjectIdentity(address, /* missingOk: */ false);
|
||||
ereport(ERROR, (errmsg("distrtibuted functions are not allowed to depend on an "
|
||||
"extension"),
|
||||
errdetail("Function \"%s\" is already distributed. Functions from "
|
||||
|
@ -1811,8 +1811,8 @@ GenerateBackupNameForProcCollision(const ObjectAddress *address)
|
|||
List *newProcName = list_make2(namespace, makeString(newName));
|
||||
|
||||
/* don't need to rename if the input arguments don't match */
|
||||
FuncCandidateList clist = FuncnameGetCandidates_compat(newProcName, numargs, NIL,
|
||||
false, false, false, true);
|
||||
FuncCandidateList clist = FuncnameGetCandidates(newProcName, numargs, NIL,
|
||||
false, false, false, true);
|
||||
for (; clist; clist = clist->next)
|
||||
{
|
||||
if (memcmp(clist->args, argtypes, sizeof(Oid) * numargs) == 0)
|
||||
|
|
|
@ -216,10 +216,10 @@ DoLocalCopy(StringInfo buffer, Oid relationId, int64 shardId, CopyStmt *copyStat
|
|||
ParseState *pState = make_parsestate(NULL);
|
||||
(void) addRangeTableEntryForRelation(pState, shard, AccessShareLock,
|
||||
NULL, false, false);
|
||||
CopyFromState cstate = BeginCopyFrom_compat(pState, shard, NULL, NULL, false,
|
||||
ReadFromLocalBufferCallback,
|
||||
copyStatement->attlist,
|
||||
copyStatement->options);
|
||||
CopyFromState cstate = BeginCopyFrom(pState, shard, NULL, NULL, false,
|
||||
ReadFromLocalBufferCallback,
|
||||
copyStatement->attlist,
|
||||
copyStatement->options);
|
||||
CopyFrom(cstate);
|
||||
EndCopyFrom(cstate);
|
||||
|
||||
|
|
|
@ -258,9 +258,6 @@ static CopyCoercionData * ColumnCoercionPaths(TupleDesc destTupleDescriptor,
|
|||
Oid *finalColumnTypeArray);
|
||||
static FmgrInfo * TypeOutputFunctions(uint32 columnCount, Oid *typeIdArray,
|
||||
bool binaryFormat);
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
static List * CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist);
|
||||
#endif
|
||||
static bool CopyStatementHasFormat(CopyStmt *copyStatement, char *formatName);
|
||||
static void CitusCopyFrom(CopyStmt *copyStatement, QueryCompletion *completionTag);
|
||||
static void EnsureCopyCanRunOnRelation(Oid relationId);
|
||||
|
@ -609,14 +606,14 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag)
|
|||
}
|
||||
|
||||
/* initialize copy state to read from COPY data source */
|
||||
CopyFromState copyState = BeginCopyFrom_compat(NULL,
|
||||
copiedDistributedRelation,
|
||||
NULL,
|
||||
copyStatement->filename,
|
||||
copyStatement->is_program,
|
||||
NULL,
|
||||
copyStatement->attlist,
|
||||
copyStatement->options);
|
||||
CopyFromState copyState = BeginCopyFrom(NULL,
|
||||
copiedDistributedRelation,
|
||||
NULL,
|
||||
copyStatement->filename,
|
||||
copyStatement->is_program,
|
||||
NULL,
|
||||
copyStatement->attlist,
|
||||
copyStatement->options);
|
||||
|
||||
/* set up callback to identify error line number */
|
||||
errorCallback.callback = CopyFromErrorCallback;
|
||||
|
@ -648,9 +645,7 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag)
|
|||
|
||||
++processedRowCount;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
pgstat_progress_update_param(PROGRESS_COPY_TUPLES_PROCESSED, processedRowCount);
|
||||
#endif
|
||||
}
|
||||
|
||||
EndCopyFrom(copyState);
|
||||
|
@ -890,28 +885,8 @@ CanUseBinaryCopyFormatForType(Oid typeId)
|
|||
HeapTuple typeTup = typeidType(typeId);
|
||||
Form_pg_type type = (Form_pg_type) GETSTRUCT(typeTup);
|
||||
Oid elementType = type->typelem;
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
char typeCategory = type->typcategory;
|
||||
#endif
|
||||
ReleaseSysCache(typeTup);
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
|
||||
/*
|
||||
* In PG versions before PG14 the array_recv function would error out more
|
||||
* than necessary.
|
||||
*
|
||||
* It errors out when the element type its oids don't match with the oid in
|
||||
* the received data. This happens pretty much always for non built in
|
||||
* types, because their oids differ between postgres intallations. So we
|
||||
* skip binary encoding when the element type is a non built in type.
|
||||
*/
|
||||
if (typeCategory == TYPCATEGORY_ARRAY && elementType >= FirstNormalObjectId)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Any type that is a wrapper around an element type (e.g. arrays and
|
||||
* ranges) require the element type to also has support for binary
|
||||
|
@ -1682,20 +1657,6 @@ AppendCopyBinaryFooters(CopyOutState footerOutputState)
|
|||
static void
|
||||
SendCopyBegin(CopyOutState cstate)
|
||||
{
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3) {
|
||||
/* old way */
|
||||
if (cstate->binary)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("COPY BINARY is not supported to stdout or from stdin")));
|
||||
pq_putemptymessage('H');
|
||||
/* grottiness needed for old COPY OUT protocol */
|
||||
pq_startcopyout();
|
||||
cstate->copy_dest = COPY_OLD_FE;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
StringInfoData buf;
|
||||
int natts = list_length(cstate->attnumlist);
|
||||
int16 format = (cstate->binary ? 1 : 0);
|
||||
|
@ -1715,16 +1676,6 @@ SendCopyBegin(CopyOutState cstate)
|
|||
static void
|
||||
SendCopyEnd(CopyOutState cstate)
|
||||
{
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
if (cstate->copy_dest != COPY_NEW_FE)
|
||||
{
|
||||
CopySendData(cstate, "\\.", 2);
|
||||
/* Need to flush out the trailer (this also appends a newline) */
|
||||
CopySendEndOfRow(cstate, true);
|
||||
pq_endcopyout(false);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
/* Shouldn't have any unsent data */
|
||||
Assert(cstate->fe_msgbuf->len == 0);
|
||||
/* Send Copy Done message */
|
||||
|
@ -1782,21 +1733,6 @@ CopySendEndOfRow(CopyOutState cstate, bool includeEndOfLine)
|
|||
|
||||
switch (cstate->copy_dest)
|
||||
{
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
case COPY_OLD_FE:
|
||||
/* The FE/BE protocol uses \n as newline for all platforms */
|
||||
if (!cstate->binary && includeEndOfLine)
|
||||
CopySendChar(cstate, '\n');
|
||||
|
||||
if (pq_putbytes(fe_msgbuf->data, fe_msgbuf->len))
|
||||
{
|
||||
/* no hope of recovering connection sync, so FATAL */
|
||||
ereport(FATAL,
|
||||
(errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("connection lost during COPY to stdout")));
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
case COPY_FRONTEND:
|
||||
/* The FE/BE protocol uses \n as newline for all platforms */
|
||||
if (!cstate->binary && includeEndOfLine)
|
||||
|
@ -3256,92 +3192,6 @@ CreateRangeTable(Relation rel, AclMode requiredAccess)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
|
||||
/* Helper for CheckCopyPermissions(), copied from postgres */
|
||||
static List *
|
||||
CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist)
|
||||
{
|
||||
/* *INDENT-OFF* */
|
||||
List *attnums = NIL;
|
||||
|
||||
if (attnamelist == NIL)
|
||||
{
|
||||
/* Generate default column list */
|
||||
int attr_count = tupDesc->natts;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < attr_count; i++)
|
||||
{
|
||||
if (TupleDescAttr(tupDesc, i)->attisdropped)
|
||||
continue;
|
||||
if (TupleDescAttr(tupDesc, i)->attgenerated)
|
||||
continue;
|
||||
attnums = lappend_int(attnums, i + 1);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Validate the user-supplied list and extract attnums */
|
||||
ListCell *l;
|
||||
|
||||
foreach(l, attnamelist)
|
||||
{
|
||||
char *name = strVal(lfirst(l));
|
||||
int attnum;
|
||||
int i;
|
||||
|
||||
/* Lookup column name */
|
||||
attnum = InvalidAttrNumber;
|
||||
for (i = 0; i < tupDesc->natts; i++)
|
||||
{
|
||||
Form_pg_attribute att = TupleDescAttr(tupDesc, i);
|
||||
|
||||
if (att->attisdropped)
|
||||
continue;
|
||||
if (namestrcmp(&(att->attname), name) == 0)
|
||||
{
|
||||
if (att->attgenerated)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
|
||||
errmsg("column \"%s\" is a generated column",
|
||||
name),
|
||||
errdetail("Generated columns cannot be used in COPY.")));
|
||||
attnum = att->attnum;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (attnum == InvalidAttrNumber)
|
||||
{
|
||||
if (rel != NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("column \"%s\" of relation \"%s\" does not exist",
|
||||
name, RelationGetRelationName(rel))));
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("column \"%s\" does not exist",
|
||||
name)));
|
||||
}
|
||||
/* Check for duplicates */
|
||||
if (list_member_int(attnums, attnum))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_COLUMN),
|
||||
errmsg("column \"%s\" specified more than once",
|
||||
name)));
|
||||
attnums = lappend_int(attnums, attnum);
|
||||
}
|
||||
}
|
||||
|
||||
return attnums;
|
||||
/* *INDENT-ON* */
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* CreateConnectionStateHash constructs a hash table which maps from socket
|
||||
* number to CopyConnectionState, passing the provided MemoryContext to
|
||||
|
|
|
@ -668,7 +668,7 @@ PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString,
|
|||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false,
|
||||
false);
|
||||
|
@ -701,7 +701,7 @@ List *
|
|||
AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
RangeVar *sequence = stmt->relation;
|
||||
Oid seqOid = RangeVarGetRelid(sequence, NoLock, missing_ok);
|
||||
|
@ -721,7 +721,7 @@ List *
|
|||
PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false,
|
||||
true);
|
||||
|
@ -755,7 +755,7 @@ PreprocessAlterSequencePersistenceStmt(Node *node, const char *queryString,
|
|||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false,
|
||||
false);
|
||||
|
@ -788,7 +788,7 @@ List *
|
|||
AlterSequencePersistenceStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
RangeVar *sequence = stmt->relation;
|
||||
Oid seqOid = RangeVarGetRelid(sequence, NoLock, missing_ok);
|
||||
|
@ -811,7 +811,7 @@ PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
|
|||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
ListCell *cmdCell = NULL;
|
||||
foreach(cmdCell, stmt->cmds)
|
||||
|
|
|
@ -1135,7 +1135,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
|
|||
if (relKind == RELKIND_SEQUENCE)
|
||||
{
|
||||
AlterTableStmt *stmtCopy = copyObject(alterTableStatement);
|
||||
AlterTableStmtObjType_compat(stmtCopy) = OBJECT_SEQUENCE;
|
||||
stmtCopy->objtype = OBJECT_SEQUENCE;
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
|
@ -1165,7 +1165,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
|
|||
* passes through an AlterTableStmt
|
||||
*/
|
||||
AlterTableStmt *stmtCopy = copyObject(alterTableStatement);
|
||||
AlterTableStmtObjType_compat(stmtCopy) = OBJECT_VIEW;
|
||||
stmtCopy->objtype = OBJECT_VIEW;
|
||||
return PreprocessAlterViewStmt((Node *) stmtCopy, alterTableCommand,
|
||||
processUtilityContext);
|
||||
}
|
||||
|
@ -2521,13 +2521,13 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
char relKind = get_rel_relkind(relationId);
|
||||
if (relKind == RELKIND_SEQUENCE)
|
||||
{
|
||||
AlterTableStmtObjType_compat(alterTableStatement) = OBJECT_SEQUENCE;
|
||||
alterTableStatement->objtype = OBJECT_SEQUENCE;
|
||||
PostprocessAlterSequenceOwnerStmt((Node *) alterTableStatement, NULL);
|
||||
return;
|
||||
}
|
||||
else if (relKind == RELKIND_VIEW)
|
||||
{
|
||||
AlterTableStmtObjType_compat(alterTableStatement) = OBJECT_VIEW;
|
||||
alterTableStatement->objtype = OBJECT_VIEW;
|
||||
PostprocessAlterViewStmt((Node *) alterTableStatement, NULL);
|
||||
return;
|
||||
}
|
||||
|
@ -3517,7 +3517,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
break;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
case AT_DetachPartitionFinalize:
|
||||
{
|
||||
ereport(ERROR, (errmsg("ALTER TABLE .. DETACH PARTITION .. FINALIZE "
|
||||
|
@ -3525,7 +3524,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
break;
|
||||
}
|
||||
|
||||
#endif
|
||||
case AT_DetachPartition:
|
||||
{
|
||||
/* we only allow partitioning commands if they are only subcommand */
|
||||
|
@ -3537,7 +3535,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
errhint("You can issue each subcommand "
|
||||
"separately.")));
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
|
||||
PartitionCmd *partitionCommand = (PartitionCmd *) command->def;
|
||||
|
||||
if (partitionCommand->concurrent)
|
||||
|
@ -3546,7 +3544,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
"CONCURRENTLY commands are currently "
|
||||
"unsupported.")));
|
||||
}
|
||||
#endif
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -3589,20 +3586,18 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
case AT_NoForceRowSecurity:
|
||||
case AT_ValidateConstraint:
|
||||
case AT_DropConstraint: /* we do the check for invalidation in AlterTableDropsForeignKey */
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
case AT_SetCompression:
|
||||
#endif
|
||||
{
|
||||
/*
|
||||
* We will not perform any special check for:
|
||||
* ALTER TABLE .. SET ACCESS METHOD ..
|
||||
* ALTER TABLE .. ALTER COLUMN .. SET NOT NULL
|
||||
* ALTER TABLE .. REPLICA IDENTITY ..
|
||||
* ALTER TABLE .. VALIDATE CONSTRAINT ..
|
||||
* ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION ..
|
||||
*/
|
||||
break;
|
||||
}
|
||||
{
|
||||
/*
|
||||
* We will not perform any special check for:
|
||||
* ALTER TABLE .. SET ACCESS METHOD ..
|
||||
* ALTER TABLE .. ALTER COLUMN .. SET NOT NULL
|
||||
* ALTER TABLE .. REPLICA IDENTITY ..
|
||||
* ALTER TABLE .. VALIDATE CONSTRAINT ..
|
||||
* ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION ..
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
case AT_SetRelOptions: /* SET (...) */
|
||||
case AT_ResetRelOptions: /* RESET (...) */
|
||||
|
|
|
@ -350,7 +350,7 @@ List *
|
|||
AlterTypeStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE);
|
||||
Assert(stmt->objtype == OBJECT_TYPE);
|
||||
|
||||
TypeName *typeName = MakeTypeNameFromRangeVar(stmt->relation);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
|
@ -549,7 +549,7 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress)
|
|||
const char *username = GetUserNameFromId(GetTypeOwner(typeAddress->objectId), false);
|
||||
initStringInfo(&buf);
|
||||
appendStringInfo(&buf, ALTER_TYPE_OWNER_COMMAND,
|
||||
getObjectIdentity_compat(typeAddress, false),
|
||||
getObjectIdentity(typeAddress, false),
|
||||
quote_identifier(username));
|
||||
ddlCommands = lappend(ddlCommands, buf.data);
|
||||
|
||||
|
|
|
@ -33,9 +33,6 @@
|
|||
#include "access/attnum.h"
|
||||
#include "access/heapam.h"
|
||||
#include "access/htup_details.h"
|
||||
#if PG_VERSION_NUM < 140000
|
||||
#include "access/xact.h"
|
||||
#endif
|
||||
#include "catalog/catalog.h"
|
||||
#include "catalog/dependency.h"
|
||||
#include "citus_version.h"
|
||||
|
@ -60,9 +57,6 @@
|
|||
#include "distributed/maintenanced.h"
|
||||
#include "distributed/multi_logical_replication.h"
|
||||
#include "distributed/multi_partitioning_utils.h"
|
||||
#if PG_VERSION_NUM < 140000
|
||||
#include "distributed/metadata_cache.h"
|
||||
#endif
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
|
@ -107,9 +101,7 @@ static void ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
struct QueryEnvironment *queryEnv,
|
||||
DestReceiver *dest,
|
||||
QueryCompletion *completionTag);
|
||||
#if PG_VERSION_NUM >= 140000
|
||||
static void set_indexsafe_procflags(void);
|
||||
#endif
|
||||
static char * CurrentSearchPath(void);
|
||||
static void IncrementUtilityHookCountersIfNecessary(Node *parsetree);
|
||||
static void PostStandardProcessUtility(Node *parsetree);
|
||||
|
@ -131,8 +123,8 @@ ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityConte
|
|||
plannedStmt->commandType = CMD_UTILITY;
|
||||
plannedStmt->utilityStmt = node;
|
||||
|
||||
ProcessUtility_compat(plannedStmt, queryString, false, context, params, NULL, dest,
|
||||
completionTag);
|
||||
ProcessUtility(plannedStmt, queryString, false, context, params, NULL, dest,
|
||||
completionTag);
|
||||
}
|
||||
|
||||
|
||||
|
@ -148,25 +140,19 @@ ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityConte
|
|||
void
|
||||
multi_ProcessUtility(PlannedStmt *pstmt,
|
||||
const char *queryString,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
bool readOnlyTree,
|
||||
#endif
|
||||
ProcessUtilityContext context,
|
||||
ParamListInfo params,
|
||||
struct QueryEnvironment *queryEnv,
|
||||
DestReceiver *dest,
|
||||
QueryCompletion *completionTag)
|
||||
{
|
||||
Node *parsetree;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
if (readOnlyTree)
|
||||
{
|
||||
pstmt = copyObject(pstmt);
|
||||
}
|
||||
#endif
|
||||
|
||||
parsetree = pstmt->utilityStmt;
|
||||
Node *parsetree = pstmt->utilityStmt;
|
||||
|
||||
if (IsA(parsetree, TransactionStmt))
|
||||
{
|
||||
|
@ -199,8 +185,8 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
|||
* that state. Since we never need to intercept transaction statements,
|
||||
* skip our checks and immediately fall into standard_ProcessUtility.
|
||||
*/
|
||||
PrevProcessUtility_compat(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
PrevProcessUtility(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -244,8 +230,8 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
|||
* Ensure that utility commands do not behave any differently until CREATE
|
||||
* EXTENSION is invoked.
|
||||
*/
|
||||
PrevProcessUtility_compat(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
PrevProcessUtility(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -276,8 +262,8 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
|||
|
||||
PG_TRY();
|
||||
{
|
||||
PrevProcessUtility_compat(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
PrevProcessUtility(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
|
||||
StoredProcedureLevel -= 1;
|
||||
|
||||
|
@ -310,8 +296,8 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
|||
|
||||
PG_TRY();
|
||||
{
|
||||
PrevProcessUtility_compat(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
PrevProcessUtility(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
|
||||
DoBlockLevel -= 1;
|
||||
}
|
||||
|
@ -649,8 +635,8 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
if (IsA(parsetree, AlterTableStmt))
|
||||
{
|
||||
AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree;
|
||||
if (AlterTableStmtObjType_compat(alterTableStmt) == OBJECT_TABLE ||
|
||||
AlterTableStmtObjType_compat(alterTableStmt) == OBJECT_FOREIGN_TABLE)
|
||||
if (alterTableStmt->objtype == OBJECT_TABLE ||
|
||||
alterTableStmt->objtype == OBJECT_FOREIGN_TABLE)
|
||||
{
|
||||
ErrorIfAlterDropsPartitionColumn(alterTableStmt);
|
||||
|
||||
|
@ -769,8 +755,8 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
PreprocessAlterExtensionCitusStmtForCitusColumnar(parsetree);
|
||||
}
|
||||
|
||||
PrevProcessUtility_compat(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
PrevProcessUtility(pstmt, queryString, false, context,
|
||||
params, queryEnv, dest, completionTag);
|
||||
|
||||
if (isAlterExtensionUpdateCitusStmt)
|
||||
{
|
||||
|
@ -1208,38 +1194,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
|
|||
*/
|
||||
if (ddlJob->startNewTransaction)
|
||||
{
|
||||
#if PG_VERSION_NUM < 140000
|
||||
|
||||
/*
|
||||
* Older versions of postgres doesn't have PROC_IN_SAFE_IC flag
|
||||
* so we cannot use set_indexsafe_procflags in those versions.
|
||||
*
|
||||
* For this reason, we do our best to ensure not grabbing any
|
||||
* snapshots later in the executor.
|
||||
*/
|
||||
|
||||
/*
|
||||
* If cache is not populated, system catalog lookups will cause
|
||||
* the xmin of current backend to change. Then the last phase
|
||||
* of CREATE INDEX CONCURRENTLY, which is in a separate backend,
|
||||
* will hang waiting for our backend and result in a deadlock.
|
||||
*
|
||||
* We populate the cache before starting the next transaction to
|
||||
* avoid this. Most of the metadata has already been resolved in
|
||||
* planning phase, we only need to lookup metadata needed for
|
||||
* connection establishment.
|
||||
*/
|
||||
(void) CurrentDatabaseName();
|
||||
|
||||
/*
|
||||
* ConnParams (AuthInfo and PoolInfo) gets a snapshot, which
|
||||
* will blocks the remote connections to localhost. Hence we warm up
|
||||
* the cache here so that after we start a new transaction, the entries
|
||||
* will already be in the hash table, hence we won't be holding any snapshots.
|
||||
*/
|
||||
WarmUpConnParamsHash();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Since it is not certain whether the code-path that we followed
|
||||
* until reaching here caused grabbing any snapshots or not, we
|
||||
|
@ -1258,8 +1212,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
|
|||
CommitTransactionCommand();
|
||||
StartTransactionCommand();
|
||||
|
||||
#if PG_VERSION_NUM >= 140000
|
||||
|
||||
/*
|
||||
* Tell other backends to ignore us, even if we grab any
|
||||
* snapshots via adaptive executor.
|
||||
|
@ -1274,7 +1226,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
|
|||
* given above.
|
||||
*/
|
||||
Assert(localExecutionSupported == false);
|
||||
#endif
|
||||
}
|
||||
|
||||
MemoryContext savedContext = CurrentMemoryContext;
|
||||
|
@ -1340,8 +1291,6 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= 140000
|
||||
|
||||
/*
|
||||
* set_indexsafe_procflags sets PROC_IN_SAFE_IC flag in MyProc->statusFlags.
|
||||
*
|
||||
|
@ -1364,9 +1313,6 @@ set_indexsafe_procflags(void)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* CurrentSearchPath is a C interface for calling current_schemas(bool) that
|
||||
* PostgreSQL exports.
|
||||
|
|
|
@ -359,12 +359,12 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
|
|||
{
|
||||
appendStringInfoString(vacuumPrefix, "SKIP_LOCKED,");
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
|
||||
if (vacuumFlags & VACOPT_PROCESS_TOAST)
|
||||
{
|
||||
appendStringInfoString(vacuumPrefix, "PROCESS_TOAST,");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (vacuumParams.truncate != VACOPTVALUE_UNSPECIFIED)
|
||||
{
|
||||
appendStringInfoString(vacuumPrefix,
|
||||
|
@ -389,13 +389,11 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
|
|||
break;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
case VACOPTVALUE_AUTO:
|
||||
{
|
||||
appendStringInfoString(vacuumPrefix, "INDEX_CLEANUP auto,");
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
default:
|
||||
{
|
||||
|
@ -501,9 +499,7 @@ VacuumStmtParams(VacuumStmt *vacstmt)
|
|||
bool freeze = false;
|
||||
bool full = false;
|
||||
bool disable_page_skipping = false;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
bool process_toast = false;
|
||||
#endif
|
||||
|
||||
/* Set default value */
|
||||
params.index_cleanup = VACOPTVALUE_UNSPECIFIED;
|
||||
|
@ -547,16 +543,12 @@ VacuumStmtParams(VacuumStmt *vacstmt)
|
|||
{
|
||||
disable_page_skipping = defGetBoolean(opt);
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
else if (strcmp(opt->defname, "process_toast") == 0)
|
||||
{
|
||||
process_toast = defGetBoolean(opt);
|
||||
}
|
||||
#endif
|
||||
else if (strcmp(opt->defname, "index_cleanup") == 0)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
|
||||
/* Interpret no string as the default, which is 'auto' */
|
||||
if (!opt->arg)
|
||||
{
|
||||
|
@ -577,10 +569,6 @@ VacuumStmtParams(VacuumStmt *vacstmt)
|
|||
VACOPTVALUE_DISABLED;
|
||||
}
|
||||
}
|
||||
#else
|
||||
params.index_cleanup = defGetBoolean(opt) ? VACOPTVALUE_ENABLED :
|
||||
VACOPTVALUE_DISABLED;
|
||||
#endif
|
||||
}
|
||||
else if (strcmp(opt->defname, "truncate") == 0)
|
||||
{
|
||||
|
@ -625,9 +613,7 @@ VacuumStmtParams(VacuumStmt *vacstmt)
|
|||
(analyze ? VACOPT_ANALYZE : 0) |
|
||||
(freeze ? VACOPT_FREEZE : 0) |
|
||||
(full ? VACOPT_FULL : 0) |
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
(process_toast ? VACOPT_PROCESS_TOAST : 0) |
|
||||
#endif
|
||||
(disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0);
|
||||
return params;
|
||||
}
|
||||
|
|
|
@ -598,7 +598,7 @@ List *
|
|||
PostprocessAlterViewStmt(Node *node, const char *queryString)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_VIEW);
|
||||
Assert(stmt->objtype == OBJECT_VIEW);
|
||||
|
||||
List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true, true);
|
||||
|
||||
|
|
|
@ -1314,33 +1314,6 @@ StartConnectionEstablishment(MultiConnection *connection, ConnectionHashKey *key
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM < 140000
|
||||
|
||||
/*
|
||||
* WarmUpConnParamsHash warms up the ConnParamsHash by loading all the
|
||||
* conn params for active primary nodes.
|
||||
*/
|
||||
void
|
||||
WarmUpConnParamsHash(void)
|
||||
{
|
||||
List *workerNodeList = ActivePrimaryNodeList(AccessShareLock);
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerNodeList)
|
||||
{
|
||||
ConnectionHashKey key;
|
||||
strlcpy(key.hostname, workerNode->workerName, MAX_NODE_LENGTH);
|
||||
key.port = workerNode->workerPort;
|
||||
strlcpy(key.database, CurrentDatabaseName(), NAMEDATALEN);
|
||||
strlcpy(key.user, CurrentUserName(), NAMEDATALEN);
|
||||
key.replicationConnParam = false;
|
||||
FindOrCreateConnParamsEntry(&key);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* FindOrCreateConnParamsEntry searches ConnParamsHash for the given key,
|
||||
* if it is not found, it is created.
|
||||
|
|
|
@ -22,9 +22,7 @@
|
|||
#include "access/skey.h"
|
||||
#include "access/stratnum.h"
|
||||
#include "access/sysattr.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
#include "access/toast_compression.h"
|
||||
#endif
|
||||
#include "access/tupdesc.h"
|
||||
#include "catalog/dependency.h"
|
||||
#include "catalog/indexing.h"
|
||||
|
@ -386,13 +384,11 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
|
|||
atttypmod);
|
||||
appendStringInfoString(&buffer, attributeTypeName);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
if (CompressionMethodIsValid(attributeForm->attcompression))
|
||||
{
|
||||
appendStringInfo(&buffer, " COMPRESSION %s",
|
||||
GetCompressionMethodName(attributeForm->attcompression));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (attributeForm->attidentity && includeIdentityDefaults)
|
||||
{
|
||||
|
@ -939,17 +935,6 @@ deparse_shard_reindex_statement(ReindexStmt *origStmt, Oid distrelid, int64 shar
|
|||
bool
|
||||
IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param)
|
||||
{
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
if (strcmp(param, "concurrently") == 0)
|
||||
{
|
||||
return reindexStmt->concurrent;
|
||||
}
|
||||
else if (strcmp(param, "verbose") == 0)
|
||||
{
|
||||
return reindexStmt->options & REINDEXOPT_VERBOSE;
|
||||
}
|
||||
return false;
|
||||
#else
|
||||
DefElem *opt = NULL;
|
||||
foreach_ptr(opt, reindexStmt->params)
|
||||
{
|
||||
|
@ -959,7 +944,6 @@ IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param)
|
|||
}
|
||||
}
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -974,7 +958,7 @@ AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer)
|
|||
{
|
||||
appendStringInfoString(temp, "VERBOSE");
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
|
||||
char *tableSpaceName = NULL;
|
||||
DefElem *opt = NULL;
|
||||
foreach_ptr(opt, reindexStmt->params)
|
||||
|
@ -997,7 +981,6 @@ AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer)
|
|||
appendStringInfo(temp, "TABLESPACE %s", tableSpaceName);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (temp->len > 0)
|
||||
{
|
||||
|
@ -1627,9 +1610,7 @@ RoleSpecString(RoleSpec *spec, bool withQuoteIdentifier)
|
|||
spec->rolename;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
case ROLESPEC_CURRENT_ROLE:
|
||||
#endif
|
||||
case ROLESPEC_CURRENT_USER:
|
||||
{
|
||||
return withQuoteIdentifier ?
|
||||
|
|
|
@ -193,7 +193,7 @@ DeparseAlterSequenceOwnerStmt(Node *node)
|
|||
StringInfoData str = { 0 };
|
||||
initStringInfo(&str);
|
||||
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
AppendAlterSequenceOwnerStmt(&str, stmt);
|
||||
|
||||
|
@ -208,7 +208,7 @@ DeparseAlterSequenceOwnerStmt(Node *node)
|
|||
static void
|
||||
AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt)
|
||||
{
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
RangeVar *seq = stmt->relation;
|
||||
char *qualifiedSequenceName = quote_qualified_identifier(seq->schemaname,
|
||||
seq->relname);
|
||||
|
@ -274,7 +274,7 @@ DeparseAlterSequencePersistenceStmt(Node *node)
|
|||
StringInfoData str = { 0 };
|
||||
initStringInfo(&str);
|
||||
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
AppendAlterSequencePersistenceStmt(&str, stmt);
|
||||
|
||||
|
@ -289,7 +289,7 @@ DeparseAlterSequencePersistenceStmt(Node *node)
|
|||
static void
|
||||
AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt)
|
||||
{
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
RangeVar *seq = stmt->relation;
|
||||
char *qualifiedSequenceName = quote_qualified_identifier(seq->schemaname,
|
||||
|
|
|
@ -229,7 +229,6 @@ AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
static void
|
||||
AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
|
||||
{
|
||||
|
@ -257,36 +256,6 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
|
|||
}
|
||||
|
||||
|
||||
#else
|
||||
static void
|
||||
AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
|
||||
{
|
||||
ColumnRef *column = NULL;
|
||||
|
||||
foreach_ptr(column, stmt->exprs)
|
||||
{
|
||||
if (!IsA(column, ColumnRef) || list_length(column->fields) != 1)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg(
|
||||
"only simple column references are allowed in CREATE STATISTICS")));
|
||||
}
|
||||
|
||||
char *columnName = NameListToQuotedString(column->fields);
|
||||
|
||||
appendStringInfoString(buf, columnName);
|
||||
|
||||
if (column != llast(stmt->exprs))
|
||||
{
|
||||
appendStringInfoString(buf, ", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
static void
|
||||
AppendTableName(StringInfo buf, CreateStatsStmt *stmt)
|
||||
{
|
||||
|
|
|
@ -77,7 +77,7 @@ DeparseAlterTableStmt(Node *node)
|
|||
StringInfoData str = { 0 };
|
||||
initStringInfo(&str);
|
||||
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TABLE);
|
||||
Assert(stmt->objtype == OBJECT_TABLE);
|
||||
|
||||
AppendAlterTableStmt(&str, stmt);
|
||||
return str.data;
|
||||
|
@ -96,7 +96,7 @@ AppendAlterTableStmt(StringInfo buf, AlterTableStmt *stmt)
|
|||
stmt->relation->relname);
|
||||
ListCell *cmdCell = NULL;
|
||||
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TABLE);
|
||||
Assert(stmt->objtype == OBJECT_TABLE);
|
||||
|
||||
appendStringInfo(buf, "ALTER TABLE %s", identifier);
|
||||
foreach(cmdCell, stmt->cmds)
|
||||
|
|
|
@ -122,7 +122,7 @@ DeparseAlterTypeStmt(Node *node)
|
|||
StringInfoData str = { 0 };
|
||||
initStringInfo(&str);
|
||||
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE);
|
||||
Assert(stmt->objtype == OBJECT_TYPE);
|
||||
|
||||
AppendAlterTypeStmt(&str, stmt);
|
||||
|
||||
|
@ -137,7 +137,7 @@ AppendAlterTypeStmt(StringInfo buf, AlterTableStmt *stmt)
|
|||
stmt->relation->relname);
|
||||
ListCell *cmdCell = NULL;
|
||||
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE);
|
||||
Assert(stmt->objtype == OBJECT_TYPE);
|
||||
|
||||
appendStringInfo(buf, "ALTER TYPE %s", identifier);
|
||||
foreach(cmdCell, stmt->cmds)
|
||||
|
|
|
@ -245,11 +245,7 @@ QualifyCollate(CollateClause *collClause, bool missing_ok)
|
|||
List *objName = NIL;
|
||||
List *objArgs = NIL;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
getObjectIdentityParts(&collationAddress, &objName, &objArgs, false);
|
||||
#else
|
||||
getObjectIdentityParts(&collationAddress, &objName, &objArgs);
|
||||
#endif
|
||||
|
||||
collClause->collname = NIL;
|
||||
char *name = NULL;
|
||||
|
|
|
@ -34,7 +34,7 @@ void
|
|||
QualifyAlterSequenceOwnerStmt(Node *node)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
RangeVar *seq = stmt->relation;
|
||||
|
||||
|
@ -62,7 +62,7 @@ void
|
|||
QualifyAlterSequencePersistenceStmt(Node *node)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE);
|
||||
Assert(stmt->objtype == OBJECT_SEQUENCE);
|
||||
|
||||
RangeVar *seq = stmt->relation;
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ void
|
|||
QualifyAlterTypeStmt(Node *node)
|
||||
{
|
||||
AlterTableStmt *stmt = castNode(AlterTableStmt, node);
|
||||
Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE);
|
||||
Assert(stmt->objtype == OBJECT_TYPE);
|
||||
|
||||
if (stmt->relation->schemaname == NULL)
|
||||
{
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -496,11 +496,7 @@ struct TaskPlacementExecution;
|
|||
/* GUC, determining whether Citus opens 1 connection per task */
|
||||
bool ForceMaxQueryParallelization = false;
|
||||
int MaxAdaptiveExecutorPoolSize = 16;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
bool EnableBinaryProtocol = true;
|
||||
#else
|
||||
bool EnableBinaryProtocol = false;
|
||||
#endif
|
||||
|
||||
/* GUC, number of ms to wait between opening connections to the same worker */
|
||||
int ExecutorSlowStartInterval = 10;
|
||||
|
|
|
@ -455,9 +455,9 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript
|
|||
location);
|
||||
copyOptions = lappend(copyOptions, copyOption);
|
||||
|
||||
CopyFromState copyState = BeginCopyFrom_compat(NULL, stubRelation, NULL,
|
||||
fileName, false, NULL,
|
||||
NULL, copyOptions);
|
||||
CopyFromState copyState = BeginCopyFrom(NULL, stubRelation, NULL,
|
||||
fileName, false, NULL,
|
||||
NULL, copyOptions);
|
||||
|
||||
while (true)
|
||||
{
|
||||
|
|
|
@ -797,11 +797,7 @@ BuildExistingQueryIdHash(void)
|
|||
{
|
||||
const int userIdAttributeNumber = 1;
|
||||
const int dbIdAttributeNumber = 2;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
const int queryIdAttributeNumber = 4;
|
||||
#else
|
||||
const int queryIdAttributeNumber = 3;
|
||||
#endif
|
||||
Datum commandTypeDatum = (Datum) 0;
|
||||
bool missingOK = true;
|
||||
|
||||
|
|
|
@ -896,18 +896,11 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
char *objectDescription = NULL;
|
||||
char *dependencyDescription = NULL;
|
||||
StringInfo errorInfo = makeStringInfo();
|
||||
StringInfo detailInfo = makeStringInfo();
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
objectDescription = getObjectDescription(objectAddress, false);
|
||||
dependencyDescription = getObjectDescription(undistributableDependency, false);
|
||||
#else
|
||||
objectDescription = getObjectDescription(objectAddress);
|
||||
dependencyDescription = getObjectDescription(undistributableDependency);
|
||||
#endif
|
||||
char *objectDescription = getObjectDescription(objectAddress, false);
|
||||
char *dependencyDescription = getObjectDescription(undistributableDependency, false);
|
||||
|
||||
/*
|
||||
* We expect callers to interpret the error returned from this function
|
||||
|
|
|
@ -85,12 +85,12 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS)
|
|||
{
|
||||
ereport(ERROR, (errmsg("object still exists"),
|
||||
errdetail("the %s \"%s\" still exists",
|
||||
getObjectTypeDescription_compat(&address,
|
||||
getObjectTypeDescription(&address,
|
||||
|
||||
/* missingOk: */ false),
|
||||
getObjectIdentity_compat(&address,
|
||||
/* missingOk: */ false),
|
||||
getObjectIdentity(&address,
|
||||
|
||||
/* missingOk: */ false)),
|
||||
/* missingOk: */ false)),
|
||||
errhint("drop the object via a DROP command")));
|
||||
}
|
||||
|
||||
|
|
|
@ -916,15 +916,9 @@ MarkObjectsDistributedCreateCommand(List *addresses,
|
|||
int forceDelegation = list_nth_int(forceDelegations, currentObjectCounter);
|
||||
List *names = NIL;
|
||||
List *args = NIL;
|
||||
char *objectType = NULL;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
objectType = getObjectTypeDescription(address, false);
|
||||
char *objectType = getObjectTypeDescription(address, false);
|
||||
getObjectIdentityParts(address, &names, &args, false);
|
||||
#else
|
||||
objectType = getObjectTypeDescription(address);
|
||||
getObjectIdentityParts(address, &names, &args);
|
||||
#endif
|
||||
|
||||
if (!isFirstObject)
|
||||
{
|
||||
|
|
|
@ -4031,11 +4031,7 @@ CancelTasksForJob(int64 jobid)
|
|||
errmsg("must be a superuser to cancel superuser tasks")));
|
||||
}
|
||||
else if (!has_privs_of_role(GetUserId(), taskOwner) &&
|
||||
#if PG_VERSION_NUM >= 140000
|
||||
!has_privs_of_role(GetUserId(), ROLE_PG_SIGNAL_BACKEND))
|
||||
#else
|
||||
!has_privs_of_role(GetUserId(), DEFAULT_ROLE_SIGNAL_BACKENDID))
|
||||
#endif
|
||||
{
|
||||
/* user doesn't have the permissions to cancel this job */
|
||||
ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
|
|
|
@ -31,11 +31,7 @@
|
|||
#include "utils/guc.h"
|
||||
#include "utils/hsearch.h"
|
||||
#include "utils/memutils.h"
|
||||
#if PG_VERSION_NUM < PG_VERSION_13
|
||||
#include "utils/hashutils.h"
|
||||
#else
|
||||
#include "common/hashfn.h"
|
||||
#endif
|
||||
|
||||
|
||||
/* Config variables managed via guc.c */
|
||||
|
|
|
@ -527,13 +527,13 @@ LocalCopyToShard(ShardCopyDestReceiver *copyDest, CopyOutState localCopyOutState
|
|||
false /* inFromCl */);
|
||||
|
||||
List *options = (isBinaryCopy) ? list_make1(binaryFormatOption) : NULL;
|
||||
CopyFromState cstate = BeginCopyFrom_compat(pState, shard,
|
||||
NULL /* whereClause */,
|
||||
NULL /* fileName */,
|
||||
false /* is_program */,
|
||||
ReadFromLocalBufferCallback,
|
||||
NULL /* attlist (NULL is all columns) */,
|
||||
options);
|
||||
CopyFromState cstate = BeginCopyFrom(pState, shard,
|
||||
NULL /* whereClause */,
|
||||
NULL /* fileName */,
|
||||
false /* is_program */,
|
||||
ReadFromLocalBufferCallback,
|
||||
NULL /* attlist (NULL is all columns) */,
|
||||
options);
|
||||
CopyFrom(cstate);
|
||||
EndCopyFrom(cstate);
|
||||
resetStringInfo(localCopyOutState->fe_msgbuf);
|
||||
|
|
|
@ -861,8 +861,8 @@ RouterModifyTaskForShardInterval(Query *originalQuery,
|
|||
* Note that this is only the case with PG14 as the parameter doesn't exist
|
||||
* prior to that.
|
||||
*/
|
||||
shardRestrictionList = make_simple_restrictinfo_compat(NULL,
|
||||
(Expr *) shardOpExpressions);
|
||||
shardRestrictionList = make_simple_restrictinfo(NULL,
|
||||
(Expr *) shardOpExpressions);
|
||||
extendedBaseRestrictInfo = lappend(extendedBaseRestrictInfo,
|
||||
shardRestrictionList);
|
||||
|
||||
|
|
|
@ -1101,8 +1101,8 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
|
|||
TupleDesc tupleDescriptor = NULL;
|
||||
Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor);
|
||||
DestReceiver *tupleStoreDest = CreateTuplestoreDestReceiver();
|
||||
SetTuplestoreDestReceiverParams_compat(tupleStoreDest, tupleStore,
|
||||
CurrentMemoryContext, false, NULL, NULL);
|
||||
SetTuplestoreDestReceiverParams(tupleStoreDest, tupleStore,
|
||||
CurrentMemoryContext, false, NULL, NULL);
|
||||
|
||||
List *parseTreeList = pg_parse_query(queryString);
|
||||
if (list_length(parseTreeList) != 1)
|
||||
|
@ -1126,15 +1126,9 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
|
|||
Query *analyzedQuery = parse_analyze_varparams_compat(parseTree, queryString,
|
||||
¶mTypes, &numParams, NULL);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
|
||||
/* pg_rewrite_query is a wrapper around QueryRewrite with some debugging logic */
|
||||
List *queryList = pg_rewrite_query(analyzedQuery);
|
||||
#else
|
||||
|
||||
/* pg_rewrite_query is not yet public in PostgreSQL 13 */
|
||||
List *queryList = QueryRewrite(analyzedQuery);
|
||||
#endif
|
||||
if (list_length(queryList) != 1)
|
||||
{
|
||||
ereport(ERROR, (errmsg("cannot EXPLAIN ANALYZE a query rewritten "
|
||||
|
|
|
@ -1855,11 +1855,7 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
{
|
||||
/* array_cat_agg() takes anyarray as input */
|
||||
catAggregateName = ARRAY_CAT_AGGREGATE_NAME;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
catInputType = ANYCOMPATIBLEARRAYOID;
|
||||
#else
|
||||
catInputType = ANYARRAYOID;
|
||||
#endif
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_JSONB_AGG ||
|
||||
aggregateType == AGGREGATE_JSONB_OBJECT_AGG)
|
||||
|
@ -1897,8 +1893,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
|
||||
if (aggregateType == AGGREGATE_ARRAY_AGG)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
|
||||
/*
|
||||
* Postgres expects the type of the array here such as INT4ARRAYOID.
|
||||
* Hence we set it to workerReturnType. If we set this to
|
||||
|
@ -1906,9 +1900,6 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
* "argument declared anycompatiblearray is not an array but type anycompatiblearray"
|
||||
*/
|
||||
newMasterAggregate->aggargtypes = list_make1_oid(workerReturnType);
|
||||
#else
|
||||
newMasterAggregate->aggargtypes = list_make1_oid(ANYARRAYOID);
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -3625,8 +3616,8 @@ static Oid
|
|||
CitusFunctionOidWithSignature(char *functionName, int numargs, Oid *argtypes)
|
||||
{
|
||||
List *aggregateName = list_make2(makeString("pg_catalog"), makeString(functionName));
|
||||
FuncCandidateList clist = FuncnameGetCandidates_compat(aggregateName, numargs, NIL,
|
||||
false, false, false, true);
|
||||
FuncCandidateList clist = FuncnameGetCandidates(aggregateName, numargs, NIL,
|
||||
false, false, false, true);
|
||||
|
||||
for (; clist; clist = clist->next)
|
||||
{
|
||||
|
|
|
@ -152,10 +152,8 @@ static List * ExtractInsertValuesList(Query *query, Var *partitionColumn);
|
|||
static DeferredErrorMessage * DeferErrorIfUnsupportedRouterPlannableSelectQuery(
|
||||
Query *query);
|
||||
static DeferredErrorMessage * ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
static DeferredErrorMessage * ErrorIfQueryHasCTEWithSearchClause(Query *queryTree);
|
||||
static bool ContainsSearchClauseWalker(Node *node, void *context);
|
||||
#endif
|
||||
static bool SelectsFromDistributedTable(List *rangeTableList, Query *query);
|
||||
static ShardPlacement * CreateDummyPlacement(bool hasLocalRelation);
|
||||
static ShardPlacement * CreateLocalDummyPlacement();
|
||||
|
@ -1118,14 +1116,12 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
|
|||
}
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
DeferredErrorMessage *CTEWithSearchClauseError =
|
||||
ErrorIfQueryHasCTEWithSearchClause(originalQuery);
|
||||
if (CTEWithSearchClauseError != NULL)
|
||||
{
|
||||
return CTEWithSearchClauseError;
|
||||
}
|
||||
#endif
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -3758,14 +3754,12 @@ DeferErrorIfUnsupportedRouterPlannableSelectQuery(Query *query)
|
|||
NULL, NULL);
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
DeferredErrorMessage *CTEWithSearchClauseError =
|
||||
ErrorIfQueryHasCTEWithSearchClause(query);
|
||||
if (CTEWithSearchClauseError != NULL)
|
||||
{
|
||||
return CTEWithSearchClauseError;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ErrorIfQueryHasUnroutableModifyingCTE(query);
|
||||
}
|
||||
|
@ -3900,8 +3894,6 @@ ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
|
||||
/*
|
||||
* ErrorIfQueryHasCTEWithSearchClause checks if the query contains any common table
|
||||
* expressions with search clause and errors out if it does.
|
||||
|
@ -3948,9 +3940,6 @@ ContainsSearchClauseWalker(Node *node, void *context)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* get_all_actual_clauses
|
||||
*
|
||||
|
|
|
@ -2143,8 +2143,8 @@ GetRestrictInfoListForRelation(RangeTblEntry *rangeTblEntry,
|
|||
* If the restriction involves multiple tables, we cannot add it to
|
||||
* input relation's expression list.
|
||||
*/
|
||||
Relids varnos = pull_varnos_compat(relationRestriction->plannerInfo,
|
||||
(Node *) restrictionClause);
|
||||
Relids varnos = pull_varnos(relationRestriction->plannerInfo,
|
||||
(Node *) restrictionClause);
|
||||
if (bms_num_members(varnos) != 1)
|
||||
{
|
||||
continue;
|
||||
|
|
|
@ -1536,7 +1536,7 @@ CreateSubscriptions(MultiConnection *sourceConnection,
|
|||
quote_identifier(target->publication->name),
|
||||
quote_identifier(target->replicationSlot->name));
|
||||
|
||||
if (EnableBinaryProtocol && PG_VERSION_NUM >= PG_VERSION_14)
|
||||
if (EnableBinaryProtocol)
|
||||
{
|
||||
appendStringInfoString(createSubscriptionCommand, ", binary=true)");
|
||||
}
|
||||
|
|
|
@ -1215,11 +1215,7 @@ RegisterCitusConfigVariables(void)
|
|||
"Enables communication between nodes using binary protocol when possible"),
|
||||
NULL,
|
||||
&EnableBinaryProtocol,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
true,
|
||||
#else
|
||||
false,
|
||||
#endif
|
||||
PGC_USERSET,
|
||||
GUC_STANDARD,
|
||||
NULL, NULL, NULL);
|
||||
|
|
|
@ -169,7 +169,6 @@ fake_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
static TransactionId
|
||||
fake_index_delete_tuples(Relation rel,
|
||||
TM_IndexDeleteOp *delstate)
|
||||
|
@ -179,20 +178,6 @@ fake_index_delete_tuples(Relation rel,
|
|||
}
|
||||
|
||||
|
||||
#else
|
||||
static TransactionId
|
||||
fake_compute_xid_horizon_for_tuples(Relation rel,
|
||||
ItemPointerData *tids,
|
||||
int nitems)
|
||||
{
|
||||
elog(ERROR, "fake_compute_xid_horizon_for_tuples not implemented");
|
||||
return InvalidTransactionId;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* Functions for manipulations of physical tuples for fake AM.
|
||||
* ----------------------------------------------------------------------------
|
||||
|
@ -568,11 +553,7 @@ static const TableAmRoutine fake_methods = {
|
|||
.tuple_get_latest_tid = fake_get_latest_tid,
|
||||
.tuple_tid_valid = fake_tuple_tid_valid,
|
||||
.tuple_satisfies_snapshot = fake_tuple_satisfies_snapshot,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
.index_delete_tuples = fake_index_delete_tuples,
|
||||
#else
|
||||
.compute_xid_horizon_for_tuples = fake_compute_xid_horizon_for_tuples,
|
||||
#endif
|
||||
|
||||
.relation_set_new_filenode = fake_relation_set_new_filenode,
|
||||
.relation_nontransactional_truncate = fake_relation_nontransactional_truncate,
|
||||
|
|
|
@ -48,8 +48,8 @@ MemoryContextTotalSpace(MemoryContext context)
|
|||
Size totalSpace = 0;
|
||||
|
||||
MemoryContextCounters totals = { 0 };
|
||||
TopTransactionContext->methods->stats_compat(TopTransactionContext, NULL, NULL,
|
||||
&totals, true);
|
||||
TopTransactionContext->methods->stats(TopTransactionContext, NULL, NULL,
|
||||
&totals, true);
|
||||
totalSpace += totals.totalspace;
|
||||
|
||||
for (MemoryContext child = context->firstchild;
|
||||
|
|
|
@ -503,11 +503,7 @@ UserHasPermissionToViewStatsOf(Oid currentUserId, Oid backendOwnedId)
|
|||
}
|
||||
|
||||
if (is_member_of_role(currentUserId,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
ROLE_PG_READ_ALL_STATS))
|
||||
#else
|
||||
DEFAULT_ROLE_READ_ALL_STATS))
|
||||
#endif
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -664,7 +664,7 @@ IsProcessWaitingForSafeOperations(PGPROC *proc)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (pgproc_statusflags_compat(proc) & PROC_IS_AUTOVACUUM)
|
||||
if (proc->statusFlags & PROC_IS_AUTOVACUUM)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1436,13 +1436,11 @@ error_severity(int elevel)
|
|||
break;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
case WARNING_CLIENT_ONLY:
|
||||
{
|
||||
prefix = gettext_noop("WARNING");
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
case ERROR:
|
||||
{
|
||||
|
|
|
@ -528,9 +528,9 @@ FixFunctionArgumentsWalker(Node *expr, void *context)
|
|||
elog(ERROR, "cache lookup failed for function %u", funcExpr->funcid);
|
||||
}
|
||||
|
||||
funcExpr->args = expand_function_arguments_compat(funcExpr->args, false,
|
||||
funcExpr->funcresulttype,
|
||||
func_tuple);
|
||||
funcExpr->args = expand_function_arguments(funcExpr->args, false,
|
||||
funcExpr->funcresulttype,
|
||||
func_tuple);
|
||||
|
||||
ReleaseSysCache(func_tuple);
|
||||
}
|
||||
|
|
|
@ -19,11 +19,6 @@
|
|||
* done before including libpq.h.
|
||||
*/
|
||||
#include "distributed/pg_version_constants.h"
|
||||
#if PG_VERSION_NUM < PG_VERSION_14
|
||||
#ifndef OPENSSL_API_COMPAT
|
||||
#define OPENSSL_API_COMPAT 0x1000100L
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include "distributed/connection_management.h"
|
||||
#include "distributed/memutils.h"
|
||||
|
|
|
@ -46,7 +46,7 @@ FunctionOidExtended(const char *schemaName, const char *functionName, int argume
|
|||
const bool findVariadics = false;
|
||||
const bool findDefaults = false;
|
||||
|
||||
FuncCandidateList functionList = FuncnameGetCandidates_compat(
|
||||
FuncCandidateList functionList = FuncnameGetCandidates(
|
||||
qualifiedFunctionNameList,
|
||||
argumentCount,
|
||||
argumentList,
|
||||
|
|
|
@ -118,9 +118,7 @@ ListToHashSet(List *itemList, Size keySize, bool isStringList)
|
|||
|
||||
if (isStringList)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
flags |= HASH_STRINGS;
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -18,9 +18,7 @@
|
|||
|
||||
#include "utils/builtins.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
#include "common/cryptohash.h"
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -1023,7 +1023,7 @@ IsParentTable(Oid relationId)
|
|||
Oid
|
||||
PartitionParentOid(Oid partitionOid)
|
||||
{
|
||||
Oid partitionParentOid = get_partition_parent_compat(partitionOid, false);
|
||||
Oid partitionParentOid = get_partition_parent(partitionOid, false);
|
||||
|
||||
return partitionParentOid;
|
||||
}
|
||||
|
@ -1074,7 +1074,7 @@ PartitionList(Oid parentRelationId)
|
|||
|
||||
ereport(ERROR, (errmsg("\"%s\" is not a parent table", relationName)));
|
||||
}
|
||||
PartitionDesc partDesc = RelationGetPartitionDesc_compat(rel, true);
|
||||
PartitionDesc partDesc = RelationGetPartitionDesc(rel, true);
|
||||
Assert(partDesc != NULL);
|
||||
|
||||
int partitionCount = partDesc->nparts;
|
||||
|
@ -1107,7 +1107,7 @@ GenerateDetachPartitionCommand(Oid partitionTableId)
|
|||
ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName)));
|
||||
}
|
||||
|
||||
Oid parentId = get_partition_parent_compat(partitionTableId, false);
|
||||
Oid parentId = get_partition_parent(partitionTableId, false);
|
||||
char *tableQualifiedName = generate_qualified_relation_name(partitionTableId);
|
||||
char *parentTableQualifiedName = generate_qualified_relation_name(parentId);
|
||||
|
||||
|
@ -1221,7 +1221,7 @@ GenerateAlterTableAttachPartitionCommand(Oid partitionTableId)
|
|||
ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName)));
|
||||
}
|
||||
|
||||
Oid parentId = get_partition_parent_compat(partitionTableId, false);
|
||||
Oid parentId = get_partition_parent(partitionTableId, false);
|
||||
char *tableQualifiedName = generate_qualified_relation_name(partitionTableId);
|
||||
char *parentTableQualifiedName = generate_qualified_relation_name(parentId);
|
||||
|
||||
|
|
|
@ -22,29 +22,6 @@
|
|||
ExecARDeleteTriggers(a, b, c, d, e)
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
#define ColumnarProcessUtility_compat(a, b, c, d, e, f, g, h) \
|
||||
ColumnarProcessUtility(a, b, c, d, e, f, g, h)
|
||||
#define PrevProcessUtilityHook_compat(a, b, c, d, e, f, g, h) \
|
||||
PrevProcessUtilityHook(a, b, c, d, e, f, g, h)
|
||||
#define GetOldestNonRemovableTransactionId_compat(a, b) \
|
||||
GetOldestNonRemovableTransactionId(a)
|
||||
#define ExecSimpleRelationInsert_compat(a, b, c) \
|
||||
ExecSimpleRelationInsert(a, b, c)
|
||||
#define index_insert_compat(a, b, c, d, e, f, g, h) \
|
||||
index_insert(a, b, c, d, e, f, g, h)
|
||||
#else
|
||||
#define ColumnarProcessUtility_compat(a, b, c, d, e, f, g, h) \
|
||||
ColumnarProcessUtility(a, b, d, e, f, g, h)
|
||||
#define PrevProcessUtilityHook_compat(a, b, c, d, e, f, g, h) \
|
||||
PrevProcessUtilityHook(a, b, d, e, f, g, h)
|
||||
#define GetOldestNonRemovableTransactionId_compat(a, b) GetOldestXmin(a, b)
|
||||
#define ExecSimpleRelationInsert_compat(a, b, c) \
|
||||
ExecSimpleRelationInsert(b, c)
|
||||
#define index_insert_compat(a, b, c, d, e, f, g, h) \
|
||||
index_insert(a, b, c, d, e, f, h)
|
||||
#endif
|
||||
|
||||
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
|
||||
|
||||
#define ExplainPropertyLong(qlabel, value, es) \
|
||||
|
|
|
@ -31,12 +31,7 @@
|
|||
typedef enum CitusCopyDest
|
||||
{
|
||||
COPY_FILE, /* to/from file (or a piped program) */
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
COPY_FRONTEND, /* to frontend */
|
||||
#else
|
||||
COPY_OLD_FE, /* to/from frontend (2.0 protocol) */
|
||||
COPY_NEW_FE, /* to/from frontend (3.0 protocol) */
|
||||
#endif
|
||||
COPY_CALLBACK /* to/from callback function */
|
||||
} CitusCopyDest;
|
||||
|
||||
|
|
|
@ -79,9 +79,7 @@ typedef struct DDLJob
|
|||
extern ProcessUtility_hook_type PrevProcessUtility;
|
||||
|
||||
extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
bool readOnlyTree,
|
||||
#endif
|
||||
ProcessUtilityContext context, ParamListInfo params,
|
||||
struct QueryEnvironment *queryEnv, DestReceiver *dest,
|
||||
QueryCompletion *completionTag
|
||||
|
|
|
@ -353,7 +353,4 @@ extern bool CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events,
|
|||
extern double MillisecondsPassedSince(instr_time moment);
|
||||
extern long MillisecondsToTimeout(instr_time start, long msAfterStart);
|
||||
|
||||
#if PG_VERSION_NUM < 140000
|
||||
extern void WarmUpConnParamsHash(void);
|
||||
#endif
|
||||
#endif /* CONNECTION_MANAGMENT_H */
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#ifndef PG_VERSION_CONSTANTS
|
||||
#define PG_VERSION_CONSTANTS
|
||||
|
||||
#define PG_VERSION_13 130000
|
||||
#define PG_VERSION_14 140000
|
||||
#define PG_VERSION_15 150000
|
||||
#define PG_VERSION_16 160000
|
||||
|
|
|
@ -61,8 +61,7 @@ pg_strtoint64(char *s)
|
|||
* We want to use it in all versions. So we backport it ourselves in earlier
|
||||
* versions, and rely on the Postgres provided version in the later versions.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13 && PG_VERSION_NUM < 130010 \
|
||||
|| PG_VERSION_NUM >= PG_VERSION_14 && PG_VERSION_NUM < 140007
|
||||
#if PG_VERSION_NUM < 140007
|
||||
static inline SMgrRelation
|
||||
RelationGetSmgr(Relation rel)
|
||||
{
|
||||
|
@ -84,67 +83,6 @@ RelationGetSmgr(Relation rel)
|
|||
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
#define AlterTableStmtObjType_compat(a) ((a)->objtype)
|
||||
#define getObjectTypeDescription_compat(a, b) getObjectTypeDescription(a, b)
|
||||
#define getObjectIdentity_compat(a, b) getObjectIdentity(a, b)
|
||||
|
||||
/* for MemoryContextMethods->stats */
|
||||
#define stats_compat(a, b, c, d, e) stats(a, b, c, d, e)
|
||||
#define FuncnameGetCandidates_compat(a, b, c, d, e, f, g) \
|
||||
FuncnameGetCandidates(a, b, c, d, e, f, g)
|
||||
#define expand_function_arguments_compat(a, b, c, d) expand_function_arguments(a, b, c, d)
|
||||
#define BeginCopyFrom_compat(a, b, c, d, e, f, g, h) BeginCopyFrom(a, b, c, d, e, f, g, h)
|
||||
#define standard_ProcessUtility_compat(a, b, c, d, e, f, g, h) \
|
||||
standard_ProcessUtility(a, b, c, d, e, f, g, h)
|
||||
#define ProcessUtility_compat(a, b, c, d, e, f, g, h) \
|
||||
ProcessUtility(a, b, c, d, e, f, g, h)
|
||||
#define PrevProcessUtility_compat(a, b, c, d, e, f, g, h) \
|
||||
PrevProcessUtility(a, b, c, d, e, f, g, h)
|
||||
#define SetTuplestoreDestReceiverParams_compat(a, b, c, d, e, f) \
|
||||
SetTuplestoreDestReceiverParams(a, b, c, d, e, f)
|
||||
#define pgproc_statusflags_compat(pgproc) ((pgproc)->statusFlags)
|
||||
#define get_partition_parent_compat(a, b) get_partition_parent(a, b)
|
||||
#define RelationGetPartitionDesc_compat(a, b) RelationGetPartitionDesc(a, b)
|
||||
#define make_simple_restrictinfo_compat(a, b) make_simple_restrictinfo(a, b)
|
||||
#define pull_varnos_compat(a, b) pull_varnos(a, b)
|
||||
#else
|
||||
#define AlterTableStmtObjType_compat(a) ((a)->relkind)
|
||||
#define F_NEXTVAL F_NEXTVAL_OID
|
||||
#define ROLE_PG_MONITOR DEFAULT_ROLE_MONITOR
|
||||
#define PROC_WAIT_STATUS_WAITING STATUS_WAITING
|
||||
#define getObjectTypeDescription_compat(a, b) getObjectTypeDescription(a)
|
||||
#define getObjectIdentity_compat(a, b) getObjectIdentity(a)
|
||||
|
||||
/* for MemoryContextMethods->stats */
|
||||
#define stats_compat(a, b, c, d, e) stats(a, b, c, d)
|
||||
#define FuncnameGetCandidates_compat(a, b, c, d, e, f, g) \
|
||||
FuncnameGetCandidates(a, b, c, d, e, g)
|
||||
#define expand_function_arguments_compat(a, b, c, d) expand_function_arguments(a, c, d)
|
||||
#define VacOptValue VacOptTernaryValue
|
||||
#define VACOPTVALUE_UNSPECIFIED VACOPT_TERNARY_DEFAULT
|
||||
#define VACOPTVALUE_DISABLED VACOPT_TERNARY_DISABLED
|
||||
#define VACOPTVALUE_ENABLED VACOPT_TERNARY_ENABLED
|
||||
#define CopyFromState CopyState
|
||||
#define BeginCopyFrom_compat(a, b, c, d, e, f, g, h) BeginCopyFrom(a, b, d, e, f, g, h)
|
||||
#define standard_ProcessUtility_compat(a, b, c, d, e, f, g, h) \
|
||||
standard_ProcessUtility(a, b, d, e, f, g, h)
|
||||
#define ProcessUtility_compat(a, b, c, d, e, f, g, h) ProcessUtility(a, b, d, e, f, g, h)
|
||||
#define PrevProcessUtility_compat(a, b, c, d, e, f, g, h) \
|
||||
PrevProcessUtility(a, b, d, e, f, g, h)
|
||||
#define COPY_FRONTEND COPY_NEW_FE
|
||||
#define SetTuplestoreDestReceiverParams_compat(a, b, c, d, e, f) \
|
||||
SetTuplestoreDestReceiverParams(a, b, c, d)
|
||||
#define pgproc_statusflags_compat(pgproc) \
|
||||
((&ProcGlobal->allPgXact[(pgproc)->pgprocno])->vacuumFlags)
|
||||
#define get_partition_parent_compat(a, b) get_partition_parent(a)
|
||||
#define RelationGetPartitionDesc_compat(a, b) RelationGetPartitionDesc(a)
|
||||
#define PQ_LARGE_MESSAGE_LIMIT 0
|
||||
#define make_simple_restrictinfo_compat(a, b) make_simple_restrictinfo(b)
|
||||
#define pull_varnos_compat(a, b) pull_varnos(b)
|
||||
#define ROLE_PG_READ_ALL_STATS DEFAULT_ROLE_READ_ALL_STATS
|
||||
#endif
|
||||
|
||||
#define SetListCellPtr(a, b) ((a)->ptr_value = (b))
|
||||
#define RangeTableEntryFromNSItem(a) ((a)->p_rte)
|
||||
#define fcGetArgValue(fc, n) ((fc)->args[n].value)
|
||||
|
|
|
@ -98,34 +98,7 @@ s/of relation ".*" violates not-null constraint/violates not-null constraint/g
|
|||
s/partition ".*" would be violated by some row/partition would be violated by some row/g
|
||||
s/of relation ".*" contains null values/contains null values/g
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14)
|
||||
# (This is not preprocessor directive, but a reminder for the developer that will drop PG13 support )
|
||||
# libpq message changes for minor versions of pg13
|
||||
|
||||
# We ignore multiline error messages, and substitute first line with a single line
|
||||
# alternative that is used in some older libpq versions.
|
||||
s/(ERROR: |WARNING: |error:) server closed the connection unexpectedly/\1 connection not open/g
|
||||
/^\s*This probably means the server terminated abnormally$/d
|
||||
/^\s*before or while processing the request.$/d
|
||||
/^\s*connection not open$/d
|
||||
|
||||
s/ERROR: fake_fetch_row_version not implemented/ERROR: fake_tuple_update not implemented/g
|
||||
s/ERROR: COMMIT is not allowed in an SQL function/ERROR: COMMIT is not allowed in a SQL function/g
|
||||
s/ERROR: ROLLBACK is not allowed in an SQL function/ERROR: ROLLBACK is not allowed in a SQL function/g
|
||||
/.*Async-Capable.*/d
|
||||
/.*Async Capable.*/d
|
||||
/Parent Relationship/d
|
||||
/Parent-Relationship/d
|
||||
s/function array_cat_agg\(anyarray\) anyarray/function array_cat_agg\(anycompatiblearray\) anycompatiblearray/g
|
||||
s/function array_cat_agg\(anyarray\)/function array_cat_agg\(anycompatiblearray\)/g
|
||||
s/TRIM\(BOTH FROM value\)/btrim\(value\)/g
|
||||
/DETAIL: Subqueries are not supported in policies on distributed tables/d
|
||||
s/ERROR: unexpected non-SELECT command in SubLink/ERROR: cannot create policy/g
|
||||
|
||||
# PG13 changes bgworker sigterm message, we can drop that line with PG13 drop
|
||||
s/(FATAL: terminating).*Citus Background Task Queue Executor.*(due to administrator command)\+/\1 connection \2 \+/g
|
||||
|
||||
#endif /* (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) */
|
||||
s/(Citus Background Task Queue Executor: regression\/postgres for \()[0-9]+\/[0-9]+\)/\1xxxxx\/xxxxx\)/g
|
||||
|
||||
# Changed outputs after minor bump to PG14.5 and PG13.8
|
||||
s/(ERROR: |WARNING: |error:) invalid socket/\1 connection not open/g
|
||||
|
@ -135,9 +108,18 @@ s/(ERROR: |WARNING: |error:) invalid socket/\1 connection not open/g
|
|||
|
||||
# pg15 changes
|
||||
# can be removed when dropping PG13&14 support
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_14) && (PG_VERSION_NUM < PG_VERSION_15)
|
||||
# (This is not preprocessor directive, but a reminder for the developer that will drop PG14 support )
|
||||
s/is not a PostgreSQL server process/is not a PostgreSQL backend process/g
|
||||
s/ AS "\?column\?"//g
|
||||
s/".*\.(.*)": (found .* removable)/"\1": \2/g
|
||||
# We ignore multiline error messages, and substitute first line with a single line
|
||||
# alternative that is used in some older libpq versions.
|
||||
s/(ERROR: |WARNING: |error:) server closed the connection unexpectedly/\1 connection not open/g
|
||||
/^\s*This probably means the server terminated abnormally$/d
|
||||
/^\s*before or while processing the request.$/d
|
||||
/^\s*connection not open$/d
|
||||
#endif /* (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) */
|
||||
|
||||
# intermediate_results
|
||||
s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g
|
||||
|
|
|
@ -166,6 +166,7 @@ DEPS = {
|
|||
"multi_table_ddl",
|
||||
],
|
||||
),
|
||||
"grant_on_schema_propagation": TestDeps("minimal_schedule"),
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -495,11 +495,11 @@ SELECT task_id, status, retry_count, message FROM pg_dist_background_task
|
|||
ORDER BY task_id; -- show that all tasks are runnable by retry policy after termination signal
|
||||
task_id | status | retry_count | message
|
||||
---------------------------------------------------------------------
|
||||
1450019 | runnable | 1 | FATAL: terminating connection due to administrator command +
|
||||
| | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (1450011/1450019) +
|
||||
1450019 | runnable | 1 | FATAL: terminating background worker "Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx)" due to administrator command+
|
||||
| | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx) +
|
||||
| | |
|
||||
1450020 | runnable | 1 | FATAL: terminating connection due to administrator command +
|
||||
| | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (1450012/1450020) +
|
||||
1450020 | runnable | 1 | FATAL: terminating background worker "Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx)" due to administrator command+
|
||||
| | | CONTEXT: Citus Background Task Queue Executor: regression/postgres for (xxxxx/xxxxx) +
|
||||
| | |
|
||||
(2 rows)
|
||||
|
||||
|
|
|
@ -85,17 +85,14 @@ SET search_path TO cpu_priority;
|
|||
-- in their CREATE SUBSCRIPTION commands.
|
||||
SET citus.log_remote_commands TO ON;
|
||||
SET citus.grep_remote_commands = '%CREATE SUBSCRIPTION%';
|
||||
-- We disable binary protocol, so we have consistent output between PG13 and
|
||||
-- PG14, beacuse PG13 doesn't support binary logical replication.
|
||||
SET citus.enable_binary_protocol = false;
|
||||
SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
master_move_shard_placement
|
||||
---------------------------------------------------------------------
|
||||
|
@ -104,13 +101,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|||
|
||||
SET citus.cpu_priority_for_logical_replication_senders = 15;
|
||||
SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical');
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
master_move_shard_placement
|
||||
---------------------------------------------------------------------
|
||||
|
@ -119,13 +116,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|||
|
||||
SET citus.max_high_priority_background_processes = 3;
|
||||
SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_move_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_move_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_move_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
master_move_shard_placement
|
||||
---------------------------------------------------------------------
|
||||
|
@ -145,21 +142,21 @@ SELECT pg_catalog.citus_split_shard_by_split_points(
|
|||
ARRAY['-1500000000'],
|
||||
ARRAY[:worker_1_node, :worker_2_node],
|
||||
'force_logical');
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx)
|
||||
NOTICE: issuing CREATE SUBSCRIPTION citus_shard_split_subscription_xxxxxxx_xxxxxxx CONNECTION 'host=''localhost'' port=xxxxx user=''postgres'' dbname=''regression'' connect_timeout=20' PUBLICATION citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx WITH (citus_use_authinfo=true, create_slot=false, copy_data=false, enabled=false, slot_name=citus_shard_split_slot_xxxxxxx_xxxxxxx_xxxxxxx, binary=true)
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
citus_split_shard_by_split_points
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -1,11 +1,3 @@
|
|||
-- This test file has an alternative output because of error messages vary for PG13
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13;
|
||||
server_version_le_13
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA generated_identities;
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
|
|
|
@ -1,431 +0,0 @@
|
|||
-- This test file has an alternative output because of error messages vary for PG13
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13;
|
||||
server_version_le_13
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA generated_identities;
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT 1 from citus_add_node('localhost', :master_port, groupId=>0);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- smallint identity column can not be distributed
|
||||
CREATE TABLE smallint_identity_column (
|
||||
a smallint GENERATED BY DEFAULT AS IDENTITY
|
||||
);
|
||||
SELECT create_distributed_table('smallint_identity_column', 'a');
|
||||
ERROR: cannot complete operation on generated_identities.smallint_identity_column with smallint/int identity column
|
||||
HINT: Use bigint identity column instead.
|
||||
SELECT create_distributed_table_concurrently('smallint_identity_column', 'a');
|
||||
ERROR: cannot complete operation on generated_identities.smallint_identity_column with smallint/int identity column
|
||||
HINT: Use bigint identity column instead.
|
||||
SELECT create_reference_table('smallint_identity_column');
|
||||
ERROR: cannot complete operation on a table with identity column
|
||||
SELECT citus_add_local_table_to_metadata('smallint_identity_column');
|
||||
citus_add_local_table_to_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE smallint_identity_column;
|
||||
-- int identity column can not be distributed
|
||||
CREATE TABLE int_identity_column (
|
||||
a int GENERATED BY DEFAULT AS IDENTITY
|
||||
);
|
||||
SELECT create_distributed_table('int_identity_column', 'a');
|
||||
ERROR: cannot complete operation on generated_identities.int_identity_column with smallint/int identity column
|
||||
HINT: Use bigint identity column instead.
|
||||
SELECT create_distributed_table_concurrently('int_identity_column', 'a');
|
||||
ERROR: cannot complete operation on generated_identities.int_identity_column with smallint/int identity column
|
||||
HINT: Use bigint identity column instead.
|
||||
SELECT create_reference_table('int_identity_column');
|
||||
ERROR: cannot complete operation on a table with identity column
|
||||
SELECT citus_add_local_table_to_metadata('int_identity_column');
|
||||
citus_add_local_table_to_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE int_identity_column;
|
||||
RESET citus.shard_replication_factor;
|
||||
CREATE TABLE bigint_identity_column (
|
||||
a bigint GENERATED BY DEFAULT AS IDENTITY,
|
||||
b int
|
||||
);
|
||||
SELECT citus_add_local_table_to_metadata('bigint_identity_column');
|
||||
citus_add_local_table_to_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE bigint_identity_column;
|
||||
CREATE TABLE bigint_identity_column (
|
||||
a bigint GENERATED BY DEFAULT AS IDENTITY,
|
||||
b int
|
||||
);
|
||||
SELECT create_distributed_table('bigint_identity_column', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\d bigint_identity_column
|
||||
Table "generated_identities.bigint_identity_column"
|
||||
Column | Type | Collation | Nullable | Default
|
||||
---------------------------------------------------------------------
|
||||
a | bigint | | not null | generated by default as identity
|
||||
b | integer | | |
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
INSERT INTO bigint_identity_column (b)
|
||||
SELECT s FROM generate_series(1,10) s;
|
||||
\d generated_identities.bigint_identity_column
|
||||
Table "generated_identities.bigint_identity_column"
|
||||
Column | Type | Collation | Nullable | Default
|
||||
---------------------------------------------------------------------
|
||||
a | bigint | | not null | generated by default as identity
|
||||
b | integer | | |
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
INSERT INTO bigint_identity_column (b)
|
||||
SELECT s FROM generate_series(11,20) s;
|
||||
SELECT * FROM bigint_identity_column ORDER BY B ASC;
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
3940649673949185 | 1
|
||||
3940649673949186 | 2
|
||||
3940649673949187 | 3
|
||||
3940649673949188 | 4
|
||||
3940649673949189 | 5
|
||||
3940649673949190 | 6
|
||||
3940649673949191 | 7
|
||||
3940649673949192 | 8
|
||||
3940649673949193 | 9
|
||||
3940649673949194 | 10
|
||||
1 | 11
|
||||
2 | 12
|
||||
3 | 13
|
||||
4 | 14
|
||||
5 | 15
|
||||
6 | 16
|
||||
7 | 17
|
||||
8 | 18
|
||||
9 | 19
|
||||
10 | 20
|
||||
(20 rows)
|
||||
|
||||
-- table with identity column cannot be altered.
|
||||
SELECT alter_distributed_table('bigint_identity_column', 'b');
|
||||
ERROR: cannot complete operation on a table with identity column
|
||||
-- table with identity column cannot be undistributed.
|
||||
SELECT undistribute_table('bigint_identity_column');
|
||||
ERROR: cannot complete operation on a table with identity column
|
||||
DROP TABLE bigint_identity_column;
|
||||
-- create a partitioned table for testing.
|
||||
CREATE TABLE partitioned_table (
|
||||
a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||
c int
|
||||
)
|
||||
PARTITION BY RANGE (c);
|
||||
CREATE TABLE partitioned_table_1_50 PARTITION OF partitioned_table FOR VALUES FROM (1) TO (50);
|
||||
CREATE TABLE partitioned_table_50_500 PARTITION OF partitioned_table FOR VALUES FROM (50) TO (1000);
|
||||
SELECT create_distributed_table('partitioned_table', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\d partitioned_table
|
||||
Partitioned table "generated_identities.partitioned_table"
|
||||
Column | Type | Collation | Nullable | Default
|
||||
---------------------------------------------------------------------
|
||||
a | bigint | | not null | generated by default as identity
|
||||
b | bigint | | not null | generated always as identity
|
||||
c | integer | | |
|
||||
Partition key: RANGE (c)
|
||||
Number of partitions: 2 (Use \d+ to list them.)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
\d generated_identities.partitioned_table
|
||||
Partitioned table "generated_identities.partitioned_table"
|
||||
Column | Type | Collation | Nullable | Default
|
||||
---------------------------------------------------------------------
|
||||
a | bigint | | not null | generated by default as identity
|
||||
b | bigint | | not null | generated always as identity
|
||||
c | integer | | |
|
||||
Partition key: RANGE (c)
|
||||
Number of partitions: 2 (Use \d+ to list them.)
|
||||
|
||||
insert into partitioned_table (c) values (1);
|
||||
insert into partitioned_table (c) SELECT 2;
|
||||
INSERT INTO partitioned_table (c)
|
||||
SELECT s FROM generate_series(3,7) s;
|
||||
\c - - - :master_port
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
INSERT INTO partitioned_table (c)
|
||||
SELECT s FROM generate_series(10,20) s;
|
||||
INSERT INTO partitioned_table (a,c) VALUES (998,998);
|
||||
INSERT INTO partitioned_table (a,b,c) OVERRIDING SYSTEM VALUE VALUES (999,999,999);
|
||||
SELECT * FROM partitioned_table ORDER BY c ASC;
|
||||
a | b | c
|
||||
---------------------------------------------------------------------
|
||||
3940649673949185 | 3940649673949185 | 1
|
||||
3940649673949195 | 3940649673949195 | 2
|
||||
3940649673949205 | 3940649673949205 | 3
|
||||
3940649673949215 | 3940649673949215 | 4
|
||||
3940649673949225 | 3940649673949225 | 5
|
||||
3940649673949235 | 3940649673949235 | 6
|
||||
3940649673949245 | 3940649673949245 | 7
|
||||
10 | 10 | 10
|
||||
20 | 20 | 11
|
||||
30 | 30 | 12
|
||||
40 | 40 | 13
|
||||
50 | 50 | 14
|
||||
60 | 60 | 15
|
||||
70 | 70 | 16
|
||||
80 | 80 | 17
|
||||
90 | 90 | 18
|
||||
100 | 100 | 19
|
||||
110 | 110 | 20
|
||||
998 | 120 | 998
|
||||
999 | 999 | 999
|
||||
(20 rows)
|
||||
|
||||
-- alter table .. alter column .. add is unsupported
|
||||
ALTER TABLE partitioned_table ALTER COLUMN g ADD GENERATED ALWAYS AS IDENTITY;
|
||||
ERROR: alter table command is currently unsupported
|
||||
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
|
||||
-- alter table .. alter column is unsupported
|
||||
ALTER TABLE partitioned_table ALTER COLUMN b TYPE int;
|
||||
ERROR: cannot execute ALTER COLUMN command involving identity column
|
||||
DROP TABLE partitioned_table;
|
||||
-- create a table for reference table testing.
|
||||
CREATE TABLE reference_table (
|
||||
a bigint CONSTRAINT myconname GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
||||
b bigint GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 10) UNIQUE,
|
||||
c int
|
||||
);
|
||||
SELECT create_reference_table('reference_table');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\d reference_table
|
||||
Table "generated_identities.reference_table"
|
||||
Column | Type | Collation | Nullable | Default
|
||||
---------------------------------------------------------------------
|
||||
a | bigint | | not null | generated by default as identity
|
||||
b | bigint | | not null | generated always as identity
|
||||
c | integer | | |
|
||||
Indexes:
|
||||
"reference_table_b_key" UNIQUE CONSTRAINT, btree (b)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO generated_identities;
|
||||
\d generated_identities.reference_table
|
||||
Table "generated_identities.reference_table"
|
||||
Column | Type | Collation | Nullable | Default
|
||||
---------------------------------------------------------------------
|
||||
a | bigint | | not null | generated by default as identity
|
||||
b | bigint | | not null | generated always as identity
|
||||
c | integer | | |
|
||||
Indexes:
|
||||
"reference_table_b_key" UNIQUE CONSTRAINT, btree (b)
|
||||
|
||||
INSERT INTO reference_table (c)
|
||||
SELECT s FROM generate_series(1,10) s;
|
||||
--on master
|
||||
select * from reference_table;
|
||||
a | b | c
|
||||
---------------------------------------------------------------------
|
||||
3940649673949185 | 3940649673949185 | 1
|
||||
3940649673949195 | 3940649673949195 | 2
|
||||
3940649673949205 | 3940649673949205 | 3
|
||||
3940649673949215 | 3940649673949215 | 4
|
||||
3940649673949225 | 3940649673949225 | 5
|
||||
3940649673949235 | 3940649673949235 | 6
|
||||
3940649673949245 | 3940649673949245 | 7
|
||||
3940649673949255 | 3940649673949255 | 8
|
||||
3940649673949265 | 3940649673949265 | 9
|
||||
3940649673949275 | 3940649673949275 | 10
|
||||
(10 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
INSERT INTO reference_table (c)
|
||||
SELECT s FROM generate_series(11,20) s;
|
||||
SELECT * FROM reference_table ORDER BY c ASC;
|
||||
a | b | c
|
||||
---------------------------------------------------------------------
|
||||
3940649673949185 | 3940649673949185 | 1
|
||||
3940649673949195 | 3940649673949195 | 2
|
||||
3940649673949205 | 3940649673949205 | 3
|
||||
3940649673949215 | 3940649673949215 | 4
|
||||
3940649673949225 | 3940649673949225 | 5
|
||||
3940649673949235 | 3940649673949235 | 6
|
||||
3940649673949245 | 3940649673949245 | 7
|
||||
3940649673949255 | 3940649673949255 | 8
|
||||
3940649673949265 | 3940649673949265 | 9
|
||||
3940649673949275 | 3940649673949275 | 10
|
||||
10 | 10 | 11
|
||||
20 | 20 | 12
|
||||
30 | 30 | 13
|
||||
40 | 40 | 14
|
||||
50 | 50 | 15
|
||||
60 | 60 | 16
|
||||
70 | 70 | 17
|
||||
80 | 80 | 18
|
||||
90 | 90 | 19
|
||||
100 | 100 | 20
|
||||
(20 rows)
|
||||
|
||||
DROP TABLE reference_table;
|
||||
CREATE TABLE color (
|
||||
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
||||
color_name VARCHAR NOT NULL
|
||||
);
|
||||
-- https://github.com/citusdata/citus/issues/6694
|
||||
CREATE USER identity_test_user;
|
||||
GRANT INSERT ON color TO identity_test_user;
|
||||
GRANT USAGE ON SCHEMA generated_identities TO identity_test_user;
|
||||
SET ROLE identity_test_user;
|
||||
SELECT create_distributed_table('color', 'color_id');
|
||||
ERROR: must be owner of table color
|
||||
SET ROLE postgres;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table_concurrently('color', 'color_id');
|
||||
create_distributed_table_concurrently
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.shard_replication_factor;
|
||||
\c - identity_test_user - :worker_1_port
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
INSERT INTO color(color_name) VALUES ('Blue');
|
||||
\c - postgres - :master_port
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
SET citus.next_shard_id TO 12400000;
|
||||
DROP TABLE Color;
|
||||
CREATE TABLE color (
|
||||
color_id BIGINT GENERATED ALWAYS AS IDENTITY UNIQUE,
|
||||
color_name VARCHAR NOT NULL
|
||||
) USING columnar;
|
||||
SELECT create_distributed_table('color', 'color_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO color(color_name) VALUES ('Blue');
|
||||
\d+ color
|
||||
Table "generated_identities.color"
|
||||
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
||||
---------------------------------------------------------------------
|
||||
color_id | bigint | | not null | generated always as identity | plain | |
|
||||
color_name | character varying | | not null | | extended | |
|
||||
Indexes:
|
||||
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO generated_identities;
|
||||
\d+ color
|
||||
Table "generated_identities.color"
|
||||
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
||||
---------------------------------------------------------------------
|
||||
color_id | bigint | | not null | generated always as identity | plain | |
|
||||
color_name | character varying | | not null | | extended | |
|
||||
Indexes:
|
||||
"color_color_id_key" UNIQUE CONSTRAINT, btree (color_id)
|
||||
|
||||
INSERT INTO color(color_name) VALUES ('Red');
|
||||
-- alter sequence .. restart
|
||||
ALTER SEQUENCE color_color_id_seq RESTART WITH 1000;
|
||||
ERROR: Altering a distributed sequence is currently not supported.
|
||||
-- override system value
|
||||
INSERT INTO color(color_id, color_name) VALUES (1, 'Red');
|
||||
ERROR: cannot insert into column "color_id"
|
||||
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||
INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red');
|
||||
ERROR: cannot insert into column "color_id"
|
||||
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||
INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red');
|
||||
ERROR: duplicate key value violates unique constraint "color_color_id_key_12400000"
|
||||
DETAIL: Key (color_id)=(1) already exists.
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- update null or custom value
|
||||
UPDATE color SET color_id = NULL;
|
||||
ERROR: column "color_id" can only be updated to DEFAULT
|
||||
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||
UPDATE color SET color_id = 1;
|
||||
ERROR: column "color_id" can only be updated to DEFAULT
|
||||
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||
\c - postgres - :master_port
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
-- alter table .. add column .. GENERATED .. AS IDENTITY
|
||||
ALTER TABLE color ADD COLUMN color_id BIGINT GENERATED ALWAYS AS IDENTITY;
|
||||
ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers
|
||||
-- alter sequence .. restart
|
||||
ALTER SEQUENCE color_color_id_seq RESTART WITH 1000;
|
||||
ERROR: Altering a distributed sequence is currently not supported.
|
||||
-- override system value
|
||||
INSERT INTO color(color_id, color_name) VALUES (1, 'Red');
|
||||
ERROR: cannot insert into column "color_id"
|
||||
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||
INSERT INTO color(color_id, color_name) VALUES (NULL, 'Red');
|
||||
ERROR: cannot insert into column "color_id"
|
||||
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||
HINT: Use OVERRIDING SYSTEM VALUE to override.
|
||||
INSERT INTO color(color_id, color_name) OVERRIDING SYSTEM VALUE VALUES (1, 'Red');
|
||||
ERROR: duplicate key value violates unique constraint "color_color_id_key_12400000"
|
||||
DETAIL: Key (color_id)=(1) already exists.
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- update null or custom value
|
||||
UPDATE color SET color_id = NULL;
|
||||
ERROR: column "color_id" can only be updated to DEFAULT
|
||||
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||
UPDATE color SET color_id = 1;
|
||||
ERROR: column "color_id" can only be updated to DEFAULT
|
||||
DETAIL: Column "color_id" is an identity column defined as GENERATED ALWAYS.
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (x int, y int, z bigint generated by default as identity);
|
||||
SELECT create_distributed_table('test', 'x', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test VALUES (1,2);
|
||||
INSERT INTO test SELECT x, y FROM test WHERE x = 1;
|
||||
SELECT * FROM test;
|
||||
x | y | z
|
||||
---------------------------------------------------------------------
|
||||
1 | 2 | 1
|
||||
1 | 2 | 2
|
||||
(2 rows)
|
||||
|
||||
DROP SCHEMA generated_identities CASCADE;
|
||||
DROP USER identity_test_user;
|
|
@ -1,7 +1,7 @@
|
|||
--
|
||||
-- GRANT_ON_SCHEMA_PROPAGATION
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- this test has different output for PG14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
|
@ -327,6 +327,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
|||
|
||||
(1 row)
|
||||
|
||||
-- to avoid different output in PG15
|
||||
GRANT CREATE ON SCHEMA public TO public;
|
||||
-- distribute the public schema (it has to be distributed by now but just in case)
|
||||
CREATE TABLE public_schema_table (id INT);
|
||||
SELECT create_distributed_table('public_schema_table', 'id');
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
--
|
||||
-- GRANT_ON_SCHEMA_PROPAGATION
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- this test has different output for PG14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
|
@ -327,6 +327,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
|||
|
||||
(1 row)
|
||||
|
||||
-- to avoid different output in PG15
|
||||
GRANT CREATE ON SCHEMA public TO public;
|
||||
-- distribute the public schema (it has to be distributed by now but just in case)
|
||||
CREATE TABLE public_schema_table (id INT);
|
||||
SELECT create_distributed_table('public_schema_table', 'id');
|
||||
|
|
|
@ -1,66 +0,0 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
|
||||
step s2-begin: BEGIN;
|
||||
step s2-update-node-1:
|
||||
-- update a specific node by address
|
||||
SELECT master_update_node(nodeid, 'localhost', nodeport + 10)
|
||||
FROM pg_dist_node
|
||||
WHERE nodename = 'localhost'
|
||||
AND nodeport = 57637;
|
||||
<waiting ...>
|
||||
step s1-abort: ABORT;
|
||||
step s2-update-node-1: <... completed>
|
||||
master_update_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-abort: ABORT;
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
|
||||
step s2-begin: BEGIN;
|
||||
step s2-update-node-1-force:
|
||||
-- update a specific node by address (force)
|
||||
SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100)
|
||||
FROM pg_dist_node
|
||||
WHERE nodename = 'localhost'
|
||||
AND nodeport = 57637;
|
||||
<waiting ...>
|
||||
step s2-update-node-1-force: <... completed>
|
||||
master_update_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-abort: ABORT;
|
||||
step s1-abort: ABORT;
|
||||
FATAL: terminating connection due to administrator command
|
||||
server closed the connection unexpectedly
|
||||
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(2 rows)
|
||||
|
|
@ -1200,7 +1200,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1209,7 +1209,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1218,7 +1218,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1227,7 +1227,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1236,7 +1236,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1245,7 +1245,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1254,7 +1254,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1263,7 +1263,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
|
|
@ -1200,7 +1200,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1209,7 +1209,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1218,7 +1218,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1227,7 +1227,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1236,7 +1236,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1245,7 +1245,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1254,7 +1254,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1263,7 +1263,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
|
|
@ -1187,7 +1187,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1196,7 +1196,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1205,7 +1205,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1214,7 +1214,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1223,7 +1223,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1232,7 +1232,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1241,7 +1241,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1250,7 +1250,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
|
|
@ -1187,7 +1187,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1196,7 +1196,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1205,7 +1205,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1214,7 +1214,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1223,7 +1223,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1232,7 +1232,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1241,7 +1241,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
@ -1250,7 +1250,7 @@ NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FR
|
|||
EXECUTE local_prepare_no_param_subquery;
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT worker_column_1 AS value FROM (SELECT distributed_table.value AS worker_column_1 FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE ((distributed_table.key OPERATOR(pg_catalog.=) ANY (ARRAY[1, 6, 500, 701])) AND (((SELECT 2))::double precision OPERATOR(pg_catalog.>) random()))) worker_subquery ORDER BY worker_column_1 LIMIT '2'::bigint
|
||||
NOTICE: executing the command locally: SELECT DISTINCT btrim(value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
NOTICE: executing the command locally: SELECT DISTINCT TRIM(BOTH FROM value) AS btrim FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text)) t
|
||||
btrim
|
||||
---------------------------------------------------------------------
|
||||
12
|
||||
|
|
|
@ -538,6 +538,7 @@ CREATE POLICY fp_s ON information FOR SELECT
|
|||
-- this attempt for distribution fails because the table has a disallowed expression
|
||||
SELECT create_distributed_table('information', 'group_id');
|
||||
ERROR: cannot create policy
|
||||
DETAIL: Subqueries are not supported in policies on distributed tables
|
||||
-- DROP the expression so we can distribute the table
|
||||
DROP POLICY fp_s ON information;
|
||||
SELECT create_distributed_table('information', 'group_id');
|
||||
|
@ -549,7 +550,7 @@ SELECT create_distributed_table('information', 'group_id');
|
|||
-- Try and create the expression on a distributed table, this should also fail
|
||||
CREATE POLICY fp_s ON information FOR SELECT
|
||||
USING (group_id <= (SELECT group_id FROM users WHERE user_name = current_user));
|
||||
ERROR: cannot create policy
|
||||
ERROR: unexpected non-SELECT command in SubLink
|
||||
-- Clean up test
|
||||
DROP TABLE information, groups, users;
|
||||
SET citus.next_shard_id TO 1810000;
|
||||
|
|
|
@ -98,19 +98,24 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Plan": {
|
||||
"Node Type": "Sort",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Aggregate",
|
||||
"Strategy": "Hashed",
|
||||
"Partial Mode": "Simple",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Group Key": ["remote_scan.l_quantity"],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Custom Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Custom Plan Provider": "Citus Adaptive",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Distributed Query": {
|
||||
"Job": {
|
||||
"Task Count": 2,
|
||||
|
@ -126,11 +131,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Strategy": "Hashed",
|
||||
"Partial Mode": "Simple",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Group Key": ["l_quantity"],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Relation Name": "lineitem_360000",
|
||||
"Alias": "lineitem"
|
||||
}
|
||||
|
@ -172,6 +180,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Plan>
|
||||
<Node-Type>Sort</Node-Type>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Sort-Key>
|
||||
<Item>(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))</Item>
|
||||
<Item>remote_scan.l_quantity</Item>
|
||||
|
@ -181,15 +190,19 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Node-Type>Aggregate</Node-Type>
|
||||
<Strategy>Hashed</Strategy>
|
||||
<Partial-Mode>Simple</Partial-Mode>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Group-Key>
|
||||
<Item>remote_scan.l_quantity</Item>
|
||||
</Group-Key>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Custom Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Distributed-Query>
|
||||
<Job>
|
||||
<Task-Count>2</Task-Count>
|
||||
|
@ -205,13 +218,16 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Strategy>Hashed</Strategy>
|
||||
<Partial-Mode>Simple</Partial-Mode>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Group-Key>
|
||||
<Item>l_quantity</Item>
|
||||
</Group-Key>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Relation-Name>lineitem_360000</Relation-Name>
|
||||
<Alias>lineitem</Alias>
|
||||
</Plan>
|
||||
|
@ -250,6 +266,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
- Plan:
|
||||
Node Type: "Sort"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Sort Key:
|
||||
- "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))"
|
||||
- "remote_scan.l_quantity"
|
||||
|
@ -257,13 +274,17 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
- Node Type: "Aggregate"
|
||||
Strategy: "Hashed"
|
||||
Partial Mode: "Simple"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Group Key:
|
||||
- "remote_scan.l_quantity"
|
||||
Plans:
|
||||
- Node Type: "Custom Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Custom Plan Provider: "Citus Adaptive"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Distributed Query:
|
||||
Job:
|
||||
Task Count: 2
|
||||
|
@ -276,11 +297,14 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Strategy: "Hashed"
|
||||
Partial Mode: "Simple"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Group Key:
|
||||
- "l_quantity"
|
||||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Relation Name: "lineitem_360000"
|
||||
Alias: "lineitem"
|
||||
|
||||
|
@ -1135,11 +1159,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Strategy": "Plain",
|
||||
"Partial Mode": "Simple",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Custom Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Custom Plan Provider": "Citus Adaptive",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Distributed Query": {
|
||||
"Job": {
|
||||
"Task Count": 6,
|
||||
|
@ -1191,11 +1218,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Strategy>Plain</Strategy>
|
||||
<Partial-Mode>Simple</Partial-Mode>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Custom Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Distributed-Query>
|
||||
<Job>
|
||||
<Task-Count>6</Task-Count>
|
||||
|
@ -1258,10 +1288,13 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Strategy: "Plain"
|
||||
Partial Mode: "Simple"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Plans:
|
||||
- Node Type: "Custom Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Custom Plan Provider: "Citus Adaptive"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Distributed Query:
|
||||
Job:
|
||||
Task Count: 6
|
||||
|
@ -1684,6 +1717,7 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
|
|||
"Plan": { +
|
||||
"Node Type": "Result", +
|
||||
"Parallel Aware": false,+
|
||||
"Async Capable": false, +
|
||||
"Actual Rows": 1, +
|
||||
"Actual Loops": 1 +
|
||||
}, +
|
||||
|
@ -1707,6 +1741,7 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
|
|||
<Plan> +
|
||||
<Node-Type>Result</Node-Type> +
|
||||
<Parallel-Aware>false</Parallel-Aware> +
|
||||
<Async-Capable>false</Async-Capable> +
|
||||
<Actual-Rows>1</Actual-Rows> +
|
||||
<Actual-Loops>1</Actual-Loops> +
|
||||
</Plan> +
|
||||
|
@ -1728,6 +1763,7 @@ SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
|
|||
- Plan: +
|
||||
Node Type: "Result" +
|
||||
Parallel Aware: false+
|
||||
Async Capable: false +
|
||||
Actual Rows: 1 +
|
||||
Actual Loops: 1 +
|
||||
Triggers:
|
||||
|
@ -2115,6 +2151,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT
|
|||
"Node Type": "Custom Scan",
|
||||
"Custom Plan Provider": "Citus Adaptive",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Actual Rows": 0,
|
||||
"Actual Loops": 1,
|
||||
"Distributed Query": {
|
||||
|
@ -2131,6 +2168,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT
|
|||
"Node Type": "ModifyTable",
|
||||
"Operation": "Insert",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Relation Name": "explain_pk_570013",
|
||||
"Alias": "citus_table_alias",
|
||||
"Actual Rows": 0,
|
||||
|
@ -2138,7 +2176,9 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INT
|
|||
"Plans": [
|
||||
{
|
||||
"Node Type": "Result",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Actual Rows": 1,
|
||||
"Actual Loops": 1
|
||||
}
|
||||
|
@ -2167,6 +2207,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * F
|
|||
"Node Type": "Custom Scan",
|
||||
"Custom Plan Provider": "Citus Adaptive",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Actual Rows": 0,
|
||||
"Actual Loops": 1,
|
||||
"Distributed Query": {
|
||||
|
@ -2184,6 +2225,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * F
|
|||
"Plan": {
|
||||
"Node Type": "Seq Scan",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Relation Name": "explain_pk_570013",
|
||||
"Alias": "explain_pk",
|
||||
"Actual Rows": 0,
|
||||
|
@ -2212,6 +2254,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO
|
|||
<Node-Type>Custom Scan</Node-Type>
|
||||
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Actual-Rows>0</Actual-Rows>
|
||||
<Actual-Loops>1</Actual-Loops>
|
||||
<Distributed-Query>
|
||||
|
@ -2228,6 +2271,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO
|
|||
<Node-Type>ModifyTable</Node-Type>
|
||||
<Operation>Insert</Operation>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Relation-Name>explain_pk_570013</Relation-Name>
|
||||
<Alias>citus_table_alias</Alias>
|
||||
<Actual-Rows>0</Actual-Rows>
|
||||
|
@ -2235,7 +2279,9 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO
|
|||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Result</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Actual-Rows>1</Actual-Rows>
|
||||
<Actual-Loops>1</Actual-Loops>
|
||||
</Plan>
|
||||
|
@ -2263,6 +2309,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FR
|
|||
<Node-Type>Custom Scan</Node-Type>
|
||||
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Actual-Rows>0</Actual-Rows>
|
||||
<Actual-Loops>1</Actual-Loops>
|
||||
<Distributed-Query>
|
||||
|
@ -2280,6 +2327,7 @@ EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FR
|
|||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Relation-Name>explain_pk_570013</Relation-Name>
|
||||
<Alias>explain_pk</Alias>
|
||||
<Actual-Rows>0</Actual-Rows>
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
--
|
||||
-- MULTI_METADATA_SYNC
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- this test has different output for PG14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
--
|
||||
-- MULTI_METADATA_SYNC
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- this test has different output for PG14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
|
|
|
@ -85,19 +85,24 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Plan": {
|
||||
"Node Type": "Sort",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Aggregate",
|
||||
"Strategy": "Hashed",
|
||||
"Partial Mode": "Simple",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Group Key": ["remote_scan.l_quantity"],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Custom Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Custom Plan Provider": "Citus Adaptive",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Distributed Query": {
|
||||
"Job": {
|
||||
"Task Count": 16,
|
||||
|
@ -113,11 +118,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Strategy": "Hashed",
|
||||
"Partial Mode": "Simple",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Group Key": ["l_quantity"],
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Relation Name": "lineitem_mx_1220052",
|
||||
"Alias": "lineitem_mx"
|
||||
}
|
||||
|
@ -153,6 +161,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Plan>
|
||||
<Node-Type>Sort</Node-Type>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Sort-Key>
|
||||
<Item>(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))</Item>
|
||||
<Item>remote_scan.l_quantity</Item>
|
||||
|
@ -162,15 +171,19 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Node-Type>Aggregate</Node-Type>
|
||||
<Strategy>Hashed</Strategy>
|
||||
<Partial-Mode>Simple</Partial-Mode>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Group-Key>
|
||||
<Item>remote_scan.l_quantity</Item>
|
||||
</Group-Key>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Custom Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Distributed-Query>
|
||||
<Job>
|
||||
<Task-Count>16</Task-Count>
|
||||
|
@ -186,13 +199,16 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Strategy>Hashed</Strategy>
|
||||
<Partial-Mode>Simple</Partial-Mode>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Group-Key>
|
||||
<Item>l_quantity</Item>
|
||||
</Group-Key>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Relation-Name>lineitem_mx_1220052</Relation-Name>
|
||||
<Alias>lineitem_mx</Alias>
|
||||
</Plan>
|
||||
|
@ -224,6 +240,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
- Plan:
|
||||
Node Type: "Sort"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Sort Key:
|
||||
- "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))"
|
||||
- "remote_scan.l_quantity"
|
||||
|
@ -231,13 +248,17 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
- Node Type: "Aggregate"
|
||||
Strategy: "Hashed"
|
||||
Partial Mode: "Simple"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Group Key:
|
||||
- "remote_scan.l_quantity"
|
||||
Plans:
|
||||
- Node Type: "Custom Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Custom Plan Provider: "Citus Adaptive"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Distributed Query:
|
||||
Job:
|
||||
Task Count: 16
|
||||
|
@ -250,11 +271,14 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Strategy: "Hashed"
|
||||
Partial Mode: "Simple"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Group Key:
|
||||
- "l_quantity"
|
||||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Relation Name: "lineitem_mx_1220052"
|
||||
Alias: "lineitem_mx"
|
||||
|
||||
|
@ -528,11 +552,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Strategy": "Plain",
|
||||
"Partial Mode": "Simple",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Custom Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Custom Plan Provider": "Citus Adaptive",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Distributed Query": {
|
||||
"Job": {
|
||||
"Task Count": 16,
|
||||
|
@ -548,34 +575,45 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
"Strategy": "Plain",
|
||||
"Partial Mode": "Simple",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Hash Join",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Join Type": "Inner",
|
||||
"Inner Unique": false,
|
||||
"Hash Cond": "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)",
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Hash Join",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Join Type": "Inner",
|
||||
"Inner Unique": false,
|
||||
"Hash Cond": "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)",
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Relation Name": "supplier_mx_1220087",
|
||||
"Alias": "supplier_mx"
|
||||
},
|
||||
{
|
||||
"Node Type": "Hash",
|
||||
"Parent Relationship": "Inner",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Relation Name": "lineitem_mx_1220052",
|
||||
"Alias": "lineitem_mx"
|
||||
}
|
||||
|
@ -585,28 +623,38 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
|||
},
|
||||
{
|
||||
"Node Type": "Hash",
|
||||
"Parent Relationship": "Inner",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Hash Join",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Join Type": "Inner",
|
||||
"Inner Unique": false,
|
||||
"Hash Cond": "(customer_mx.c_custkey = orders_mx.o_custkey)",
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Relation Name": "customer_mx_1220084",
|
||||
"Alias": "customer_mx"
|
||||
},
|
||||
{
|
||||
"Node Type": "Hash",
|
||||
"Parent Relationship": "Inner",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Plans": [
|
||||
{
|
||||
"Node Type": "Seq Scan",
|
||||
"Parent Relationship": "Outer",
|
||||
"Parallel Aware": false,
|
||||
"Async Capable": false,
|
||||
"Relation Name": "orders_mx_1220068",
|
||||
"Alias": "orders_mx"
|
||||
}
|
||||
|
@ -653,11 +701,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Strategy>Plain</Strategy>
|
||||
<Partial-Mode>Simple</Partial-Mode>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Custom Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Custom-Plan-Provider>Citus Adaptive</Custom-Plan-Provider>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Distributed-Query>
|
||||
<Job>
|
||||
<Task-Count>16</Task-Count>
|
||||
|
@ -673,34 +724,45 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
<Strategy>Plain</Strategy>
|
||||
<Partial-Mode>Simple</Partial-Mode>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Hash Join</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Join-Type>Inner</Join-Type>
|
||||
<Inner-Unique>false</Inner-Unique>
|
||||
<Hash-Cond>(lineitem_mx.l_orderkey = orders_mx.o_orderkey)</Hash-Cond>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Hash Join</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Join-Type>Inner</Join-Type>
|
||||
<Inner-Unique>false</Inner-Unique>
|
||||
<Hash-Cond>(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)</Hash-Cond>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Relation-Name>supplier_mx_1220087</Relation-Name>
|
||||
<Alias>supplier_mx</Alias>
|
||||
</Plan>
|
||||
<Plan>
|
||||
<Node-Type>Hash</Node-Type>
|
||||
<Parent-Relationship>Inner</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Relation-Name>lineitem_mx_1220052</Relation-Name>
|
||||
<Alias>lineitem_mx</Alias>
|
||||
</Plan>
|
||||
|
@ -710,28 +772,38 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
|||
</Plan>
|
||||
<Plan>
|
||||
<Node-Type>Hash</Node-Type>
|
||||
<Parent-Relationship>Inner</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Hash Join</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Join-Type>Inner</Join-Type>
|
||||
<Inner-Unique>false</Inner-Unique>
|
||||
<Hash-Cond>(customer_mx.c_custkey = orders_mx.o_custkey)</Hash-Cond>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Relation-Name>customer_mx_1220084</Relation-Name>
|
||||
<Alias>customer_mx</Alias>
|
||||
</Plan>
|
||||
<Plan>
|
||||
<Node-Type>Hash</Node-Type>
|
||||
<Parent-Relationship>Inner</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Plans>
|
||||
<Plan>
|
||||
<Node-Type>Seq Scan</Node-Type>
|
||||
<Parent-Relationship>Outer</Parent-Relationship>
|
||||
<Parallel-Aware>false</Parallel-Aware>
|
||||
<Async-Capable>false</Async-Capable>
|
||||
<Relation-Name>orders_mx_1220068</Relation-Name>
|
||||
<Alias>orders_mx</Alias>
|
||||
</Plan>
|
||||
|
@ -775,10 +847,13 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Strategy: "Plain"
|
||||
Partial Mode: "Simple"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Plans:
|
||||
- Node Type: "Custom Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Custom Plan Provider: "Citus Adaptive"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Distributed Query:
|
||||
Job:
|
||||
Task Count: 16
|
||||
|
@ -791,48 +866,69 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
|||
Strategy: "Plain"
|
||||
Partial Mode: "Simple"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Plans:
|
||||
- Node Type: "Hash Join"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Join Type: "Inner"
|
||||
Inner Unique: false
|
||||
Hash Cond: "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)"
|
||||
Plans:
|
||||
- Node Type: "Hash Join"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Join Type: "Inner"
|
||||
Inner Unique: false
|
||||
Hash Cond: "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)"
|
||||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Relation Name: "supplier_mx_1220087"
|
||||
Alias: "supplier_mx"
|
||||
- Node Type: "Hash"
|
||||
Parent Relationship: "Inner"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Relation Name: "lineitem_mx_1220052"
|
||||
Alias: "lineitem_mx"
|
||||
- Node Type: "Hash"
|
||||
Parent Relationship: "Inner"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Plans:
|
||||
- Node Type: "Hash Join"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Join Type: "Inner"
|
||||
Inner Unique: false
|
||||
Hash Cond: "(customer_mx.c_custkey = orders_mx.o_custkey)"
|
||||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Relation Name: "customer_mx_1220084"
|
||||
Alias: "customer_mx"
|
||||
- Node Type: "Hash"
|
||||
Parent Relationship: "Inner"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Plans:
|
||||
- Node Type: "Seq Scan"
|
||||
Parent Relationship: "Outer"
|
||||
Parallel Aware: false
|
||||
Async Capable: false
|
||||
Relation Name: "orders_mx_1220068"
|
||||
Alias: "orders_mx"
|
||||
|
||||
|
|
|
@ -1,10 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14
|
||||
\gset
|
||||
\if :server_version_ge_14
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
create schema pg14;
|
||||
set search_path to pg14;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14
|
||||
\gset
|
||||
\if :server_version_ge_14
|
||||
\else
|
||||
\q
|
|
@ -37,7 +37,7 @@ CREATE PROCEDURE test_procedure_commit(tt_id int, tt_org_id int) LANGUAGE SQL AS
|
|||
COMMIT;
|
||||
$$;
|
||||
CALL test_procedure_commit(2,5);
|
||||
ERROR: COMMIT is not allowed in a SQL function
|
||||
ERROR: COMMIT is not allowed in an SQL function
|
||||
CONTEXT: SQL function "test_procedure_commit" during startup
|
||||
SELECT * FROM test_table ORDER BY 1, 2;
|
||||
id | org_id
|
||||
|
@ -52,7 +52,7 @@ CREATE PROCEDURE test_procedure_rollback(tt_id int, tt_org_id int) LANGUAGE SQL
|
|||
COMMIT;
|
||||
$$;
|
||||
CALL test_procedure_rollback(2,15);
|
||||
ERROR: ROLLBACK is not allowed in a SQL function
|
||||
ERROR: ROLLBACK is not allowed in an SQL function
|
||||
CONTEXT: SQL function "test_procedure_rollback" during startup
|
||||
SELECT * FROM test_table ORDER BY 1, 2;
|
||||
id | org_id
|
||||
|
|
|
@ -2,12 +2,7 @@
|
|||
-- stat_statements
|
||||
--
|
||||
-- tests citus_stat_statements functionality
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14
|
||||
\gset
|
||||
\if :server_version_ge_14
|
||||
SET compute_query_id = 'on';
|
||||
\endif
|
||||
-- check if pg_stat_statements is available
|
||||
SELECT name FROM pg_available_extensions WHERE name = 'pg_stat_statements';
|
||||
name
|
||||
|
@ -72,11 +67,7 @@ select query, calls from citus_stat_statements();
|
|||
insert into test values($1) | 1
|
||||
(1 row)
|
||||
|
||||
\if :server_version_ge_14
|
||||
SET compute_query_id = 'off';
|
||||
\else
|
||||
set citus.stat_statements_track = 'none';
|
||||
\endif
|
||||
-- for pg >= 14, since compute_query_id is off, this insert
|
||||
-- shouldn't be tracked
|
||||
-- for pg < 14, we disable it explicitly so that we don't need
|
||||
|
@ -88,11 +79,7 @@ select query, calls from citus_stat_statements();
|
|||
insert into test values($1) | 1
|
||||
(1 row)
|
||||
|
||||
\if :server_version_ge_14
|
||||
SET compute_query_id = 'on';
|
||||
\else
|
||||
RESET citus.stat_statements_track;
|
||||
\endif
|
||||
SELECT citus_stat_statements_reset();
|
||||
citus_stat_statements_reset
|
||||
---------------------------------------------------------------------
|
||||
|
@ -646,6 +633,4 @@ CONTEXT: PL/pgSQL function citus_stat_statements() line XX at RAISE
|
|||
-- drop created tables
|
||||
DROP TABLE stat_test_text, stat_test_bigint, stat_test_bigint_other, stat_test_reference;
|
||||
DROP FUNCTION normalize_query_string(text);
|
||||
\if :server_version_ge_14
|
||||
SET compute_query_id = 'off';
|
||||
\endif
|
||||
|
|
|
@ -114,7 +114,7 @@ delete from test_ref;
|
|||
ERROR: fake_tuple_delete not implemented
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
update test_ref set a=2;
|
||||
ERROR: fake_tuple_update not implemented
|
||||
ERROR: fake_fetch_row_version not implemented
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
RESET client_min_messages;
|
||||
-- ddl events should include "USING fake_am"
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
-- ===================================================================
|
||||
-- test top level window functions that are pushdownable
|
||||
-- ===================================================================
|
||||
-- This test file has an alternative output because of use of
|
||||
-- incremental sort in some explain outputs in PG13
|
||||
--
|
||||
-- a very simple window function with an aggregate and a window function
|
||||
-- distribution column is on the partition by clause
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,6 @@
|
|||
// Three alternative test outputs:
|
||||
// Two alternative test outputs:
|
||||
// isolation_master_update_node.out for PG15
|
||||
// isolation_master_update_node_0.out for PG14
|
||||
// isolation_master_update_node_1.out for PG13
|
||||
|
||||
setup
|
||||
{
|
||||
|
|
|
@ -63,9 +63,6 @@ SET search_path TO cpu_priority;
|
|||
-- in their CREATE SUBSCRIPTION commands.
|
||||
SET citus.log_remote_commands TO ON;
|
||||
SET citus.grep_remote_commands = '%CREATE SUBSCRIPTION%';
|
||||
-- We disable binary protocol, so we have consistent output between PG13 and
|
||||
-- PG14, beacuse PG13 doesn't support binary logical replication.
|
||||
SET citus.enable_binary_protocol = false;
|
||||
SELECT master_move_shard_placement(11568900, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical');
|
||||
SET citus.cpu_priority_for_logical_replication_senders = 15;
|
||||
SELECT master_move_shard_placement(11568900, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical');
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
-- This test file has an alternative output because of error messages vary for PG13
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int <= 13 AS server_version_le_13;
|
||||
|
||||
CREATE SCHEMA generated_identities;
|
||||
SET search_path TO generated_identities;
|
||||
SET client_min_messages to ERROR;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
--
|
||||
-- GRANT_ON_SCHEMA_PROPAGATION
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- this test has different output for PG14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
|
@ -189,6 +189,9 @@ DROP SCHEMA dist_schema CASCADE;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
|
||||
-- to avoid different output in PG15
|
||||
GRANT CREATE ON SCHEMA public TO public;
|
||||
|
||||
-- distribute the public schema (it has to be distributed by now but just in case)
|
||||
CREATE TABLE public_schema_table (id INT);
|
||||
SELECT create_distributed_table('public_schema_table', 'id');
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
--
|
||||
-- MULTI_METADATA_SYNC
|
||||
--
|
||||
-- this test has different output for PG13/14 compared to PG15
|
||||
-- this test has different output for PG14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
|
|
|
@ -1,11 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14
|
||||
\gset
|
||||
\if :server_version_ge_14
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
|
||||
create schema pg14;
|
||||
set search_path to pg14;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
|
|
@ -3,12 +3,7 @@
|
|||
--
|
||||
-- tests citus_stat_statements functionality
|
||||
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 14 AS server_version_ge_14
|
||||
\gset
|
||||
\if :server_version_ge_14
|
||||
SET compute_query_id = 'on';
|
||||
\endif
|
||||
|
||||
-- check if pg_stat_statements is available
|
||||
SELECT name FROM pg_available_extensions WHERE name = 'pg_stat_statements';
|
||||
|
@ -50,11 +45,7 @@ SELECT create_distributed_table('test','a');
|
|||
insert into test values(1);
|
||||
|
||||
select query, calls from citus_stat_statements();
|
||||
\if :server_version_ge_14
|
||||
SET compute_query_id = 'off';
|
||||
\else
|
||||
set citus.stat_statements_track = 'none';
|
||||
\endif
|
||||
|
||||
-- for pg >= 14, since compute_query_id is off, this insert
|
||||
-- shouldn't be tracked
|
||||
|
@ -64,11 +55,7 @@ insert into test values(1);
|
|||
select query, calls from citus_stat_statements();
|
||||
|
||||
|
||||
\if :server_version_ge_14
|
||||
SET compute_query_id = 'on';
|
||||
\else
|
||||
RESET citus.stat_statements_track;
|
||||
\endif
|
||||
|
||||
|
||||
SELECT citus_stat_statements_reset();
|
||||
|
@ -290,6 +277,4 @@ DROP TABLE stat_test_text, stat_test_bigint, stat_test_bigint_other, stat_test_r
|
|||
DROP FUNCTION normalize_query_string(text);
|
||||
|
||||
|
||||
\if :server_version_ge_14
|
||||
SET compute_query_id = 'off';
|
||||
\endif
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
-- ===================================================================
|
||||
-- test top level window functions that are pushdownable
|
||||
-- ===================================================================
|
||||
-- This test file has an alternative output because of use of
|
||||
-- incremental sort in some explain outputs in PG13
|
||||
--
|
||||
|
||||
-- a very simple window function with an aggregate and a window function
|
||||
|
|
Loading…
Reference in New Issue