Drop postgres 12 support (#6040)

* Remove if conditions with PG_VERSION_NUM < 13

* Remove server_above_twelve(&eleven) checks from tests

* Fix tests

* Remove pg12 and pg11 alternative test output files

* Remove pg12 specific normalization rules

* Some more if conditions in the code

* Change RemoteCollationIdExpression and some pg12/pg13 comments

* Remove some more normalization rules
pull/6072/head
Naisila Puka 2022-07-20 17:49:36 +03:00 committed by GitHub
parent c085ac026a
commit 7d6410c838
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
118 changed files with 126 additions and 9702 deletions

View File

@ -121,10 +121,8 @@ static void ColumnarScan_ExplainCustomScan(CustomScanState *node, List *ancestor
static const char * ColumnarPushdownClausesStr(List *context, List *clauses); static const char * ColumnarPushdownClausesStr(List *context, List *clauses);
static const char * ColumnarProjectedColumnsStr(List *context, static const char * ColumnarProjectedColumnsStr(List *context,
List *projectedColumns); List *projectedColumns);
#if PG_VERSION_NUM >= 130000
static List * set_deparse_context_planstate(List *dpcontext, Node *node, static List * set_deparse_context_planstate(List *dpcontext, Node *node,
List *ancestors); List *ancestors);
#endif
/* other helpers */ /* other helpers */
static List * ColumnarVarNeeded(ColumnarScanState *columnarScanState); static List * ColumnarVarNeeded(ColumnarScanState *columnarScanState);
@ -1986,8 +1984,6 @@ ColumnarVarNeeded(ColumnarScanState *columnarScanState)
} }
#if PG_VERSION_NUM >= 130000
/* /*
* set_deparse_context_planstate is a compatibility wrapper for versions 13+. * set_deparse_context_planstate is a compatibility wrapper for versions 13+.
*/ */
@ -1997,6 +1993,3 @@ set_deparse_context_planstate(List *dpcontext, Node *node, List *ancestors)
PlanState *ps = (PlanState *) node; PlanState *ps = (PlanState *) node;
return set_deparse_context_plan(dpcontext, ps->plan, ancestors); return set_deparse_context_plan(dpcontext, ps->plan, ancestors);
} }
#endif

View File

@ -12,11 +12,7 @@
#include "access/rewriteheap.h" #include "access/rewriteheap.h"
#include "access/tableam.h" #include "access/tableam.h"
#include "access/tsmapi.h" #include "access/tsmapi.h"
#if PG_VERSION_NUM >= 130000
#include "access/detoast.h" #include "access/detoast.h"
#else
#include "access/tuptoaster.h"
#endif
#include "access/xact.h" #include "access/xact.h"
#include "catalog/catalog.h" #include "catalog/catalog.h"
#include "catalog/index.h" #include "catalog/index.h"
@ -1676,15 +1672,8 @@ ColumnarReadRowsIntoIndex(TableScanDesc scan, Relation indexRelation,
/* currently, columnar tables can't have dead tuples */ /* currently, columnar tables can't have dead tuples */
bool tupleIsAlive = true; bool tupleIsAlive = true;
#if PG_VERSION_NUM >= PG_VERSION_13
indexCallback(indexRelation, &itemPointerData, indexValues, indexNulls, indexCallback(indexRelation, &itemPointerData, indexValues, indexNulls,
tupleIsAlive, indexCallbackState); tupleIsAlive, indexCallbackState);
#else
HeapTuple scanTuple = ExecCopySlotHeapTuple(slot);
scanTuple->t_self = itemPointerData;
indexCallback(indexRelation, scanTuple, indexValues, indexNulls,
tupleIsAlive, indexCallbackState);
#endif
reltuples++; reltuples++;
} }

View File

@ -14,12 +14,8 @@
#include "access/multixact.h" #include "access/multixact.h"
#include "access/rewriteheap.h" #include "access/rewriteheap.h"
#include "access/tsmapi.h" #include "access/tsmapi.h"
#if PG_VERSION_NUM >= 130000
#include "access/heaptoast.h" #include "access/heaptoast.h"
#include "common/hashfn.h" #include "common/hashfn.h"
#else
#include "access/tuptoaster.h"
#endif
#include "access/xact.h" #include "access/xact.h"
#include "catalog/catalog.h" #include "catalog/catalog.h"
#include "catalog/index.h" #include "catalog/index.h"

View File

@ -1567,11 +1567,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
ExecuteQueryViaSPI(query->data, SPI_OK_INSERT); ExecuteQueryViaSPI(query->data, SPI_OK_INSERT);
} }
#if PG_VERSION_NUM >= PG_VERSION_13
List *ownedSequences = getOwnedSequences(sourceId); List *ownedSequences = getOwnedSequences(sourceId);
#else
List *ownedSequences = getOwnedSequences(sourceId, InvalidAttrNumber);
#endif
Oid sequenceOid = InvalidOid; Oid sequenceOid = InvalidOid;
foreach_oid(sequenceOid, ownedSequences) foreach_oid(sequenceOid, ownedSequences)
{ {

View File

@ -36,7 +36,6 @@ PreprocessClusterStmt(Node *node, const char *clusterCommand,
{ {
ClusterStmt *clusterStmt = castNode(ClusterStmt, node); ClusterStmt *clusterStmt = castNode(ClusterStmt, node);
bool missingOK = false; bool missingOK = false;
DDLJob *ddlJob = NULL;
if (clusterStmt->relation == NULL) if (clusterStmt->relation == NULL)
{ {
@ -67,18 +66,14 @@ PreprocessClusterStmt(Node *node, const char *clusterCommand,
return NIL; return NIL;
} }
#if PG_VERSION_NUM >= 120000
if (IsClusterStmtVerbose_compat(clusterStmt)) if (IsClusterStmtVerbose_compat(clusterStmt))
#else
if (clusterStmt->verbose)
#endif
{ {
ereport(ERROR, (errmsg("cannot run CLUSTER command"), ereport(ERROR, (errmsg("cannot run CLUSTER command"),
errdetail("VERBOSE option is currently unsupported " errdetail("VERBOSE option is currently unsupported "
"for distributed tables."))); "for distributed tables.")));
} }
ddlJob = palloc0(sizeof(DDLJob)); DDLJob *ddlJob = palloc0(sizeof(DDLJob));
ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId);
ddlJob->metadataSyncCommand = clusterCommand; ddlJob->metadataSyncCommand = clusterCommand;
ddlJob->taskList = DDLTaskList(relationId, clusterCommand); ddlJob->taskList = DDLTaskList(relationId, clusterCommand);

View File

@ -874,7 +874,6 @@ static DistributeObjectOps Schema_Rename = {
.address = AlterSchemaRenameStmtObjectAddress, .address = AlterSchemaRenameStmtObjectAddress,
.markDistributed = false, .markDistributed = false,
}; };
#if PG_VERSION_NUM >= PG_VERSION_13
static DistributeObjectOps Statistics_Alter = { static DistributeObjectOps Statistics_Alter = {
.deparse = DeparseAlterStatisticsStmt, .deparse = DeparseAlterStatisticsStmt,
.qualify = QualifyAlterStatisticsStmt, .qualify = QualifyAlterStatisticsStmt,
@ -883,7 +882,6 @@ static DistributeObjectOps Statistics_Alter = {
.address = NULL, .address = NULL,
.markDistributed = false, .markDistributed = false,
}; };
#endif
static DistributeObjectOps Statistics_AlterObjectSchema = { static DistributeObjectOps Statistics_AlterObjectSchema = {
.deparse = DeparseAlterStatisticsSchemaStmt, .deparse = DeparseAlterStatisticsSchemaStmt,
.qualify = QualifyAlterStatisticsSchemaStmt, .qualify = QualifyAlterStatisticsSchemaStmt,
@ -1304,13 +1302,11 @@ GetDistributeObjectOps(Node *node)
return &Sequence_Alter; return &Sequence_Alter;
} }
#if PG_VERSION_NUM >= PG_VERSION_13
case T_AlterStatsStmt: case T_AlterStatsStmt:
{ {
return &Statistics_Alter; return &Statistics_Alter;
} }
#endif
case T_AlterTableStmt: case T_AlterTableStmt:
{ {
AlterTableStmt *stmt = castNode(AlterTableStmt, node); AlterTableStmt *stmt = castNode(AlterTableStmt, node);

View File

@ -920,11 +920,8 @@ GetFunctionAlterOwnerCommand(const RegProcedure funcOid)
/* /*
* GetAggregateDDLCommand returns a string for creating an aggregate. * GetAggregateDDLCommand returns a string for creating an aggregate.
* CREATE OR REPLACE AGGREGATE was only introduced in pg12, * A second parameter useCreateOrReplace signals whether to
* so a second parameter useCreateOrReplace signals whether to * to create a plain CREATE AGGREGATE or not.
* to create a plain CREATE AGGREGATE or not. In pg11 we return a string
* which is a call to worker_create_or_replace_object in lieu of
* CREATE OR REPLACE AGGREGATE.
*/ */
static char * static char *
GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)

View File

@ -106,9 +106,7 @@
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#include "parser/parse_func.h" #include "parser/parse_func.h"
#include "parser/parse_type.h" #include "parser/parse_type.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "tcop/cmdtag.h" #include "tcop/cmdtag.h"
#endif
#include "tsearch/ts_locale.h" #include "tsearch/ts_locale.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
@ -764,12 +762,7 @@ FindJsonbInputColumns(TupleDesc tupleDescriptor, List *inputColumnNameList)
static void static void
CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount) CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount)
{ {
#if PG_VERSION_NUM >= PG_VERSION_13
SetQueryCompletion(completionTag, CMDTAG_COPY, processedRowCount); SetQueryCompletion(completionTag, CMDTAG_COPY, processedRowCount);
#else
SafeSnprintf(completionTag, COMPLETION_TAG_BUFSIZE,
"COPY " UINT64_FORMAT, processedRowCount);
#endif
} }
@ -781,9 +774,6 @@ static List *
RemoveOptionFromList(List *optionList, char *optionName) RemoveOptionFromList(List *optionList, char *optionName)
{ {
ListCell *optionCell = NULL; ListCell *optionCell = NULL;
#if PG_VERSION_NUM < PG_VERSION_13
ListCell *previousCell = NULL;
#endif
foreach(optionCell, optionList) foreach(optionCell, optionList)
{ {
DefElem *option = (DefElem *) lfirst(optionCell); DefElem *option = (DefElem *) lfirst(optionCell);
@ -792,9 +782,6 @@ RemoveOptionFromList(List *optionList, char *optionName)
{ {
return list_delete_cell_compat(optionList, optionCell, previousCell); return list_delete_cell_compat(optionList, optionCell, previousCell);
} }
#if PG_VERSION_NUM < PG_VERSION_13
previousCell = optionCell;
#endif
} }
return optionList; return optionList;

View File

@ -290,22 +290,10 @@ PostprocessCreatePolicyStmt(Node *node, const char *queryString)
static void static void
AddRangeTableEntryToQueryCompat(ParseState *parseState, Relation relation) AddRangeTableEntryToQueryCompat(ParseState *parseState, Relation relation)
{ {
#if PG_VERSION_NUM >= PG_VERSION_13 ParseNamespaceItem *rte = addRangeTableEntryForRelation(parseState, relation,
ParseNamespaceItem *rte = NULL; AccessShareLock, NULL,
#else false, false);
RangeTblEntry *rte = NULL;
#endif
rte = addRangeTableEntryForRelation(parseState, relation,
#if PG_VERSION_NUM >= PG_VERSION_12
AccessShareLock,
#endif
NULL, false, false);
#if PG_VERSION_NUM >= PG_VERSION_13
addNSItemToQuery(parseState, rte, false, true, true); addNSItemToQuery(parseState, rte, false, true, true);
#else
addRTEtoQuery(parseState, rte, false, true, true);
#endif
} }

View File

@ -197,11 +197,7 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList,
if (list_length(columnOwnedSequences) != 0) if (list_length(columnOwnedSequences) != 0)
{ {
/* /*
* A column might only own one sequence. We intentionally use * A column might only own one sequence.
* GetSequencesOwnedByColumn macro and pick initial oid from the
* list instead of using getOwnedSequence. This is both because
* getOwnedSequence is removed in pg13 and is also because it
* errors out if column does not have any sequences.
*/ */
Assert(list_length(columnOwnedSequences) == 1); Assert(list_length(columnOwnedSequences) == 1);
ownedSequenceId = linitial_oid(columnOwnedSequences); ownedSequenceId = linitial_oid(columnOwnedSequences);

View File

@ -55,9 +55,7 @@ static char * GenerateAlterIndexColumnSetStatsCommand(char *indexNameWithSchema,
int32 attstattarget); int32 attstattarget);
static Oid GetRelIdByStatsOid(Oid statsOid); static Oid GetRelIdByStatsOid(Oid statsOid);
static char * CreateAlterCommandIfOwnerNotDefault(Oid statsOid); static char * CreateAlterCommandIfOwnerNotDefault(Oid statsOid);
#if PG_VERSION_NUM >= PG_VERSION_13
static char * CreateAlterCommandIfTargetNotDefault(Oid statsOid); static char * CreateAlterCommandIfTargetNotDefault(Oid statsOid);
#endif
/* /*
* PreprocessCreateStatisticsStmt is called during the planning phase for * PreprocessCreateStatisticsStmt is called during the planning phase for
@ -343,8 +341,6 @@ AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk)
} }
#if PG_VERSION_NUM >= PG_VERSION_13
/* /*
* PreprocessAlterStatisticsStmt is called during the planning phase for * PreprocessAlterStatisticsStmt is called during the planning phase for
* ALTER STATISTICS .. SET STATISTICS. * ALTER STATISTICS .. SET STATISTICS.
@ -393,8 +389,6 @@ PreprocessAlterStatisticsStmt(Node *node, const char *queryString,
} }
#endif
/* /*
* PreprocessAlterStatisticsOwnerStmt is called during the planning phase for * PreprocessAlterStatisticsOwnerStmt is called during the planning phase for
* ALTER STATISTICS .. OWNER TO. * ALTER STATISTICS .. OWNER TO.
@ -507,7 +501,6 @@ GetExplicitStatisticsCommandList(Oid relationId)
explicitStatisticsCommandList = explicitStatisticsCommandList =
lappend(explicitStatisticsCommandList, lappend(explicitStatisticsCommandList,
makeTableDDLCommandString(createStatisticsCommand)); makeTableDDLCommandString(createStatisticsCommand));
#if PG_VERSION_NUM >= PG_VERSION_13
/* we need to alter stats' target if it's getting distributed after creation */ /* we need to alter stats' target if it's getting distributed after creation */
char *alterStatisticsTargetCommand = char *alterStatisticsTargetCommand =
@ -519,7 +512,6 @@ GetExplicitStatisticsCommandList(Oid relationId)
lappend(explicitStatisticsCommandList, lappend(explicitStatisticsCommandList,
makeTableDDLCommandString(alterStatisticsTargetCommand)); makeTableDDLCommandString(alterStatisticsTargetCommand));
} }
#endif
/* we need to alter stats' owner if it's getting distributed after creation */ /* we need to alter stats' owner if it's getting distributed after creation */
char *alterStatisticsOwnerCommand = char *alterStatisticsOwnerCommand =
@ -709,8 +701,6 @@ CreateAlterCommandIfOwnerNotDefault(Oid statsOid)
} }
#if PG_VERSION_NUM >= PG_VERSION_13
/* /*
* CreateAlterCommandIfTargetNotDefault returns an ALTER STATISTICS .. SET STATISTICS * CreateAlterCommandIfTargetNotDefault returns an ALTER STATISTICS .. SET STATISTICS
* command if the stats object with given id has a target different than the default one. * command if the stats object with given id has a target different than the default one.
@ -745,6 +735,3 @@ CreateAlterCommandIfTargetNotDefault(Oid statsOid)
return DeparseAlterStatisticsStmt((Node *) alterStatsStmt); return DeparseAlterStatisticsStmt((Node *) alterStatsStmt);
} }
#endif

View File

@ -35,9 +35,6 @@ Node *
ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt) ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt)
{ {
ListCell *currCell = NULL; ListCell *currCell = NULL;
#if PG_VERSION_NUM < PG_VERSION_13
ListCell *prevCell = NULL;
#endif
bool useAuthinfo = false; bool useAuthinfo = false;
foreach(currCell, createSubStmt->options) foreach(currCell, createSubStmt->options)
@ -54,9 +51,6 @@ ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt)
break; break;
} }
#if PG_VERSION_NUM < PG_VERSION_13
prevCell = currCell;
#endif
} }
if (useAuthinfo) if (useAuthinfo)

View File

@ -757,9 +757,9 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
{ {
/* /*
* We don't process subcommands generated by postgres. * We don't process subcommands generated by postgres.
* This is mainly because postgres started to issue ALTER TABLE commands * This is mainly because postgres issues ALTER TABLE commands
* for some set of objects that are defined via CREATE TABLE commands as * for some set of objects that are defined via CREATE TABLE commands.
* of pg13. However, citus already has a separate logic for CREATE TABLE * However, citus already has a separate logic for CREATE TABLE
* commands. * commands.
* *
* To support foreign keys from/to postgres local tables to/from reference * To support foreign keys from/to postgres local tables to/from reference

View File

@ -41,10 +41,7 @@ typedef struct CitusVacuumParams
int options; int options;
VacOptValue truncate; VacOptValue truncate;
VacOptValue index_cleanup; VacOptValue index_cleanup;
#if PG_VERSION_NUM >= PG_VERSION_13
int nworkers; int nworkers;
#endif
} CitusVacuumParams; } CitusVacuumParams;
/* Local functions forward declarations for processing distributed table commands */ /* Local functions forward declarations for processing distributed table commands */
@ -323,10 +320,8 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
/* if no flags remain, exit early */ /* if no flags remain, exit early */
if (vacuumFlags == 0 && if (vacuumFlags == 0 &&
vacuumParams.truncate == VACOPTVALUE_UNSPECIFIED && vacuumParams.truncate == VACOPTVALUE_UNSPECIFIED &&
vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED &&
#if PG_VERSION_NUM >= PG_VERSION_13 vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET
&& vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET
#endif
) )
{ {
return vacuumPrefix->data; return vacuumPrefix->data;
@ -409,12 +404,10 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
} }
} }
#if PG_VERSION_NUM >= PG_VERSION_13
if (vacuumParams.nworkers != VACUUM_PARALLEL_NOTSET) if (vacuumParams.nworkers != VACUUM_PARALLEL_NOTSET)
{ {
appendStringInfo(vacuumPrefix, "PARALLEL %d,", vacuumParams.nworkers); appendStringInfo(vacuumPrefix, "PARALLEL %d,", vacuumParams.nworkers);
} }
#endif
vacuumPrefix->data[vacuumPrefix->len - 1] = ')'; vacuumPrefix->data[vacuumPrefix->len - 1] = ')';
@ -515,9 +508,7 @@ VacuumStmtParams(VacuumStmt *vacstmt)
/* Set default value */ /* Set default value */
params.index_cleanup = VACOPTVALUE_UNSPECIFIED; params.index_cleanup = VACOPTVALUE_UNSPECIFIED;
params.truncate = VACOPTVALUE_UNSPECIFIED; params.truncate = VACOPTVALUE_UNSPECIFIED;
#if PG_VERSION_NUM >= PG_VERSION_13
params.nworkers = VACUUM_PARALLEL_NOTSET; params.nworkers = VACUUM_PARALLEL_NOTSET;
#endif
/* Parse options list */ /* Parse options list */
DefElem *opt = NULL; DefElem *opt = NULL;
@ -596,7 +587,6 @@ VacuumStmtParams(VacuumStmt *vacstmt)
params.truncate = defGetBoolean(opt) ? VACOPTVALUE_ENABLED : params.truncate = defGetBoolean(opt) ? VACOPTVALUE_ENABLED :
VACOPTVALUE_DISABLED; VACOPTVALUE_DISABLED;
} }
#if PG_VERSION_NUM >= PG_VERSION_13
else if (strcmp(opt->defname, "parallel") == 0) else if (strcmp(opt->defname, "parallel") == 0)
{ {
if (opt->arg == NULL) if (opt->arg == NULL)
@ -620,7 +610,6 @@ VacuumStmtParams(VacuumStmt *vacstmt)
params.nworkers = nworkers; params.nworkers = nworkers;
} }
} }
#endif
else else
{ {
ereport(ERROR, ereport(ERROR,

View File

@ -48,11 +48,7 @@
#include "distributed/tuplestore.h" #include "distributed/tuplestore.h"
#include "distributed/worker_manager.h" #include "distributed/worker_manager.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#if PG_VERSION_NUM < PG_VERSION_13
#include "utils/hashutils.h"
#else
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
#define RESERVED_CONNECTION_COLUMNS 4 #define RESERVED_CONNECTION_COLUMNS 4

View File

@ -26,9 +26,7 @@
#include "distributed/placement_connection.h" #include "distributed/placement_connection.h"
#include "distributed/relation_access_tracking.h" #include "distributed/relation_access_tracking.h"
#include "utils/hsearch.h" #include "utils/hsearch.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
#include "utils/memutils.h" #include "utils/memutils.h"

View File

@ -36,12 +36,7 @@
#include "distributed/time_constants.h" #include "distributed/time_constants.h"
#include "distributed/tuplestore.h" #include "distributed/tuplestore.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#if PG_VERSION_NUM < PG_VERSION_13
#include "utils/hsearch.h"
#include "utils/hashutils.h"
#else
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
#include "storage/ipc.h" #include "storage/ipc.h"

View File

@ -526,7 +526,7 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
} }
/* /*
* Add table access methods for pg12 and higher when the table is configured with an * Add table access methods when the table is configured with an
* access method * access method
*/ */
if (accessMethod) if (accessMethod)
@ -999,7 +999,6 @@ deparse_index_columns(StringInfo buffer, List *indexParameterList, List *deparse
appendStringInfo(buffer, "%s ", appendStringInfo(buffer, "%s ",
NameListToQuotedString(indexElement->opclass)); NameListToQuotedString(indexElement->opclass));
} }
#if PG_VERSION_NUM >= PG_VERSION_13
/* Commit on postgres: 911e70207703799605f5a0e8aad9f06cff067c63*/ /* Commit on postgres: 911e70207703799605f5a0e8aad9f06cff067c63*/
if (indexElement->opclassopts != NIL) if (indexElement->opclassopts != NIL)
@ -1008,7 +1007,6 @@ deparse_index_columns(StringInfo buffer, List *indexParameterList, List *deparse
AppendStorageParametersToString(buffer, indexElement->opclassopts); AppendStorageParametersToString(buffer, indexElement->opclassopts);
appendStringInfoString(buffer, ") "); appendStringInfoString(buffer, ") ");
} }
#endif
if (indexElement->ordering != SORTBY_DEFAULT) if (indexElement->ordering != SORTBY_DEFAULT)
{ {

View File

@ -27,9 +27,7 @@ static void AppendCreateStatisticsStmt(StringInfo buf, CreateStatsStmt *stmt);
static void AppendDropStatisticsStmt(StringInfo buf, List *nameList, bool ifExists); static void AppendDropStatisticsStmt(StringInfo buf, List *nameList, bool ifExists);
static void AppendAlterStatisticsRenameStmt(StringInfo buf, RenameStmt *stmt); static void AppendAlterStatisticsRenameStmt(StringInfo buf, RenameStmt *stmt);
static void AppendAlterStatisticsSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt); static void AppendAlterStatisticsSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt);
#if PG_VERSION_NUM >= PG_VERSION_13
static void AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt); static void AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt);
#endif
static void AppendAlterStatisticsOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt); static void AppendAlterStatisticsOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt);
static void AppendStatisticsName(StringInfo buf, CreateStatsStmt *stmt); static void AppendStatisticsName(StringInfo buf, CreateStatsStmt *stmt);
static void AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt); static void AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt);
@ -90,7 +88,6 @@ DeparseAlterStatisticsSchemaStmt(Node *node)
} }
#if PG_VERSION_NUM >= PG_VERSION_13
char * char *
DeparseAlterStatisticsStmt(Node *node) DeparseAlterStatisticsStmt(Node *node)
{ {
@ -105,7 +102,6 @@ DeparseAlterStatisticsStmt(Node *node)
} }
#endif
char * char *
DeparseAlterStatisticsOwnerStmt(Node *node) DeparseAlterStatisticsOwnerStmt(Node *node)
{ {
@ -177,7 +173,6 @@ AppendAlterStatisticsSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt)
} }
#if PG_VERSION_NUM >= PG_VERSION_13
static void static void
AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt) AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt)
{ {
@ -186,7 +181,6 @@ AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt)
} }
#endif
static void static void
AppendAlterStatisticsOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt) AppendAlterStatisticsOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt)
{ {

View File

@ -152,8 +152,6 @@ QualifyAlterStatisticsSchemaStmt(Node *node)
} }
#if PG_VERSION_NUM >= PG_VERSION_13
/* /*
* QualifyAlterStatisticsStmt qualifies AlterStatsStmt's with schema name for * QualifyAlterStatisticsStmt qualifies AlterStatsStmt's with schema name for
* ALTER STATISTICS .. SET STATISTICS statements. * ALTER STATISTICS .. SET STATISTICS statements.
@ -180,8 +178,6 @@ QualifyAlterStatisticsStmt(Node *node)
} }
#endif
/* /*
* QualifyAlterStatisticsOwnerStmt qualifies AlterOwnerStmt's with schema * QualifyAlterStatisticsOwnerStmt qualifies AlterOwnerStmt's with schema
* name for ALTER STATISTICS .. OWNER TO statements. * name for ALTER STATISTICS .. OWNER TO statements.

File diff suppressed because it is too large Load Diff

View File

@ -1790,17 +1790,9 @@ AcquireExecutorShardLocksForExecution(DistributedExecution *execution)
/* Acquire additional locks for SELECT .. FOR UPDATE on reference tables */ /* Acquire additional locks for SELECT .. FOR UPDATE on reference tables */
AcquireExecutorShardLocksForRelationRowLockList(task->relationRowLockList); AcquireExecutorShardLocksForRelationRowLockList(task->relationRowLockList);
/*
* Due to PG commit 5ee190f8ec37c1bbfb3061e18304e155d600bc8e we copy the
* second parameter in pre-13.
*/
relationRowLockList = relationRowLockList =
list_concat(relationRowLockList, list_concat(relationRowLockList,
#if (PG_VERSION_NUM >= PG_VERSION_12) && (PG_VERSION_NUM < PG_VERSION_13)
list_copy(task->relationRowLockList));
#else
task->relationRowLockList); task->relationRowLockList);
#endif
/* /*
* If the task has a subselect, then we may need to lock the shards from which * If the task has a subselect, then we may need to lock the shards from which
@ -1814,19 +1806,9 @@ AcquireExecutorShardLocksForExecution(DistributedExecution *execution)
* and therefore prevents other modifications from running * and therefore prevents other modifications from running
* concurrently. * concurrently.
*/ */
/*
* Due to PG commit 5ee190f8ec37c1bbfb3061e18304e155d600bc8e we copy the
* second parameter in pre-13.
*/
requiresConsistentSnapshotRelationShardList = requiresConsistentSnapshotRelationShardList =
list_concat(requiresConsistentSnapshotRelationShardList, list_concat(requiresConsistentSnapshotRelationShardList,
#if (PG_VERSION_NUM >= PG_VERSION_12) && (PG_VERSION_NUM < PG_VERSION_13)
list_copy(task->relationShardList));
#else
task->relationShardList); task->relationShardList);
#endif
} }
} }

View File

@ -34,9 +34,7 @@
#include "catalog/pg_rewrite_d.h" #include "catalog/pg_rewrite_d.h"
#include "catalog/pg_shdepend.h" #include "catalog/pg_shdepend.h"
#include "catalog/pg_type.h" #include "catalog/pg_type.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
#include "distributed/commands.h" #include "distributed/commands.h"
#include "distributed/commands/utility_hook.h" #include "distributed/commands/utility_hook.h"
#include "distributed/listutils.h" #include "distributed/listutils.h"

View File

@ -75,9 +75,7 @@
#include "utils/elog.h" #include "utils/elog.h"
#include "utils/hsearch.h" #include "utils/hsearch.h"
#include "utils/jsonb.h" #include "utils/jsonb.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
#include "utils/inval.h" #include "utils/inval.h"
#include "utils/fmgroids.h" #include "utils/fmgroids.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"

View File

@ -3804,9 +3804,8 @@ RemoteTypeIdExpression(Oid typeId)
/* /*
* RemoteCollationIdExpression returns an expression in text form that can * RemoteCollationIdExpression returns an expression in text form that can
* be used to obtain the OID of a type on a different node when included * be used to obtain the OID of a collation on a different node when included
* in a query string. Currently this is a sublink because regcollation type * in a query string.
* is not available in PG12.
*/ */
static char * static char *
RemoteCollationIdExpression(Oid colocationId) RemoteCollationIdExpression(Oid colocationId)
@ -3825,16 +3824,15 @@ RemoteCollationIdExpression(Oid colocationId)
(Form_pg_collation) GETSTRUCT(collationTuple); (Form_pg_collation) GETSTRUCT(collationTuple);
char *collationName = NameStr(collationform->collname); char *collationName = NameStr(collationform->collname);
char *collationSchemaName = get_namespace_name(collationform->collnamespace); char *collationSchemaName = get_namespace_name(collationform->collnamespace);
char *qualifiedCollationName = quote_qualified_identifier(collationSchemaName,
collationName);
StringInfo colocationIdQuery = makeStringInfo(); StringInfo regcollationExpression = makeStringInfo();
appendStringInfo(colocationIdQuery, appendStringInfo(regcollationExpression,
"(select oid from pg_collation" "%s::regcollation",
" where collname = %s" quote_literal_cstr(qualifiedCollationName));
" and collnamespace = %s::regnamespace)",
quote_literal_cstr(collationName),
quote_literal_cstr(collationSchemaName));
expression = colocationIdQuery->data; expression = regcollationExpression->data;
} }
ReleaseSysCache(collationTuple); ReleaseSysCache(collationTuple);

View File

@ -66,9 +66,6 @@
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#include "utils/rel.h" #include "utils/rel.h"
#include "utils/syscache.h" #include "utils/syscache.h"
#if PG_VERSION_NUM < 120000
#include "utils/tqual.h"
#endif
#define DISK_SPACE_FIELDS 2 #define DISK_SPACE_FIELDS 2

View File

@ -60,10 +60,7 @@
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#include "utils/memutils.h" #include "utils/memutils.h"
#include "utils/syscache.h" #include "utils/syscache.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
/* RebalanceOptions are the options used to control the rebalance algorithm */ /* RebalanceOptions are the options used to control the rebalance algorithm */

View File

@ -131,9 +131,7 @@ static void WarnIfListHasForeignDistributedTable(List *rangeTableList);
/* Distributed planner hook */ /* Distributed planner hook */
PlannedStmt * PlannedStmt *
distributed_planner(Query *parse, distributed_planner(Query *parse,
#if PG_VERSION_NUM >= PG_VERSION_13
const char *query_string, const char *query_string,
#endif
int cursorOptions, int cursorOptions,
ParamListInfo boundParams) ParamListInfo boundParams)
{ {
@ -1839,7 +1837,7 @@ TranslatedVars(PlannerInfo *root, int relationIndex)
FindTargetAppendRelInfo(root, relationIndex); FindTargetAppendRelInfo(root, relationIndex);
if (targetAppendRelInfo != NULL) if (targetAppendRelInfo != NULL)
{ {
/* postgres deletes translated_vars after pg13, hence we deep copy them here */ /* postgres deletes translated_vars, hence we deep copy them here */
Node *targetNode = NULL; Node *targetNode = NULL;
foreach_ptr(targetNode, targetAppendRelInfo->translated_vars) foreach_ptr(targetNode, targetAppendRelInfo->translated_vars)
{ {

View File

@ -170,7 +170,7 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
} }
/* /*
* In pg12's planning phase empty FROMs are represented with an RTE_RESULT. * In the planning phase empty FROMs are represented with an RTE_RESULT.
* When we arrive here, standard_planner has already been called which calls * When we arrive here, standard_planner has already been called which calls
* replace_empty_jointree() which replaces empty fromlist with a list of * replace_empty_jointree() which replaces empty fromlist with a list of
* single RTE_RESULT RangleTableRef node. * single RTE_RESULT RangleTableRef node.

View File

@ -21,9 +21,7 @@
#include "distributed/query_utils.h" #include "distributed/query_utils.h"
#include "distributed/worker_manager.h" #include "distributed/worker_manager.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
/* controlled via GUC, used mostly for testing */ /* controlled via GUC, used mostly for testing */
bool LogIntermediateResults = false; bool LogIntermediateResults = false;
@ -373,9 +371,6 @@ RemoveLocalNodeFromWorkerList(List *workerNodeList)
int32 localGroupId = GetLocalGroupId(); int32 localGroupId = GetLocalGroupId();
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
#if PG_VERSION_NUM < PG_VERSION_13
ListCell *prev = NULL;
#endif
foreach(workerNodeCell, workerNodeList) foreach(workerNodeCell, workerNodeList)
{ {
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
@ -383,9 +378,6 @@ RemoveLocalNodeFromWorkerList(List *workerNodeList)
{ {
return list_delete_cell_compat(workerNodeList, workerNodeCell, prev); return list_delete_cell_compat(workerNodeList, workerNodeCell, prev);
} }
#if PG_VERSION_NUM < PG_VERSION_13
prev = workerNodeCell;
#endif
} }
return workerNodeList; return workerNodeList;

View File

@ -297,8 +297,6 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
*/ */
char *queryString = pstrdup(""); char *queryString = pstrdup("");
instr_time planduration; instr_time planduration;
#if PG_VERSION_NUM >= PG_VERSION_13
BufferUsage bufusage_start, BufferUsage bufusage_start,
bufusage; bufusage;
@ -306,7 +304,7 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
{ {
bufusage_start = pgBufferUsage; bufusage_start = pgBufferUsage;
} }
#endif
if (es->format == EXPLAIN_FORMAT_TEXT) if (es->format == EXPLAIN_FORMAT_TEXT)
{ {
char *resultId = GenerateResultId(planId, subPlan->subPlanId); char *resultId = GenerateResultId(planId, subPlan->subPlanId);
@ -350,15 +348,12 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
INSTR_TIME_SET_ZERO(planduration); INSTR_TIME_SET_ZERO(planduration);
#if PG_VERSION_NUM >= PG_VERSION_13
/* calc differences of buffer counters. */ /* calc differences of buffer counters. */
if (es->buffers) if (es->buffers)
{ {
memset(&bufusage, 0, sizeof(BufferUsage)); memset(&bufusage, 0, sizeof(BufferUsage));
BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
} }
#endif
ExplainOpenGroup("PlannedStmt", "PlannedStmt", false, es); ExplainOpenGroup("PlannedStmt", "PlannedStmt", false, es);
@ -923,18 +918,13 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es)
appendStringInfo(explainQuery, appendStringInfo(explainQuery,
"EXPLAIN (ANALYZE %s, VERBOSE %s, " "EXPLAIN (ANALYZE %s, VERBOSE %s, "
"COSTS %s, BUFFERS %s, " "COSTS %s, BUFFERS %s, WAL %s, "
#if PG_VERSION_NUM >= PG_VERSION_13
"WAL %s, "
#endif
"TIMING %s, SUMMARY %s, FORMAT %s) %s", "TIMING %s, SUMMARY %s, FORMAT %s) %s",
es->analyze ? "TRUE" : "FALSE", es->analyze ? "TRUE" : "FALSE",
es->verbose ? "TRUE" : "FALSE", es->verbose ? "TRUE" : "FALSE",
es->costs ? "TRUE" : "FALSE", es->costs ? "TRUE" : "FALSE",
es->buffers ? "TRUE" : "FALSE", es->buffers ? "TRUE" : "FALSE",
#if PG_VERSION_NUM >= PG_VERSION_13
es->wal ? "TRUE" : "FALSE", es->wal ? "TRUE" : "FALSE",
#endif
es->timing ? "TRUE" : "FALSE", es->timing ? "TRUE" : "FALSE",
es->summary ? "TRUE" : "FALSE", es->summary ? "TRUE" : "FALSE",
formatStr, formatStr,
@ -1028,9 +1018,7 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
/* use the same defaults as NewExplainState() for following options */ /* use the same defaults as NewExplainState() for following options */
es->buffers = ExtractFieldBoolean(explainOptions, "buffers", es->buffers); es->buffers = ExtractFieldBoolean(explainOptions, "buffers", es->buffers);
#if PG_VERSION_NUM >= PG_VERSION_13
es->wal = ExtractFieldBoolean(explainOptions, "wal", es->wal); es->wal = ExtractFieldBoolean(explainOptions, "wal", es->wal);
#endif
es->costs = ExtractFieldBoolean(explainOptions, "costs", es->costs); es->costs = ExtractFieldBoolean(explainOptions, "costs", es->costs);
es->summary = ExtractFieldBoolean(explainOptions, "summary", es->summary); es->summary = ExtractFieldBoolean(explainOptions, "summary", es->summary);
es->verbose = ExtractFieldBoolean(explainOptions, "verbose", es->verbose); es->verbose = ExtractFieldBoolean(explainOptions, "verbose", es->verbose);
@ -1178,9 +1166,7 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into,
/* save the flags of current EXPLAIN command */ /* save the flags of current EXPLAIN command */
CurrentDistributedQueryExplainOptions.costs = es->costs; CurrentDistributedQueryExplainOptions.costs = es->costs;
CurrentDistributedQueryExplainOptions.buffers = es->buffers; CurrentDistributedQueryExplainOptions.buffers = es->buffers;
#if PG_VERSION_NUM >= PG_VERSION_13
CurrentDistributedQueryExplainOptions.wal = es->wal; CurrentDistributedQueryExplainOptions.wal = es->wal;
#endif
CurrentDistributedQueryExplainOptions.verbose = es->verbose; CurrentDistributedQueryExplainOptions.verbose = es->verbose;
CurrentDistributedQueryExplainOptions.summary = es->summary; CurrentDistributedQueryExplainOptions.summary = es->summary;
CurrentDistributedQueryExplainOptions.timing = es->timing; CurrentDistributedQueryExplainOptions.timing = es->timing;
@ -1189,7 +1175,6 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into,
/* rest is copied from ExplainOneQuery() */ /* rest is copied from ExplainOneQuery() */
instr_time planstart, instr_time planstart,
planduration; planduration;
#if PG_VERSION_NUM >= PG_VERSION_13
BufferUsage bufusage_start, BufferUsage bufusage_start,
bufusage; bufusage;
@ -1197,7 +1182,6 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into,
{ {
bufusage_start = pgBufferUsage; bufusage_start = pgBufferUsage;
} }
#endif
INSTR_TIME_SET_CURRENT(planstart); INSTR_TIME_SET_CURRENT(planstart);
@ -1205,7 +1189,6 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into,
PlannedStmt *plan = pg_plan_query_compat(query, NULL, cursorOptions, params); PlannedStmt *plan = pg_plan_query_compat(query, NULL, cursorOptions, params);
INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SET_CURRENT(planduration);
INSTR_TIME_SUBTRACT(planduration, planstart); INSTR_TIME_SUBTRACT(planduration, planstart);
#if PG_VERSION_NUM >= PG_VERSION_13
/* calc differences of buffer counters. */ /* calc differences of buffer counters. */
if (es->buffers) if (es->buffers)
@ -1213,7 +1196,6 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into,
memset(&bufusage, 0, sizeof(BufferUsage)); memset(&bufusage, 0, sizeof(BufferUsage));
BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
} }
#endif
/* run it (if needed) and produce output */ /* run it (if needed) and produce output */
ExplainOnePlanCompat(plan, into, es, queryString, params, queryEnv, ExplainOnePlanCompat(plan, into, es, queryString, params, queryEnv,
@ -1467,17 +1449,12 @@ WrapQueryForExplainAnalyze(const char *queryString, TupleDesc tupleDesc,
StringInfo explainOptions = makeStringInfo(); StringInfo explainOptions = makeStringInfo();
appendStringInfo(explainOptions, appendStringInfo(explainOptions,
"{\"verbose\": %s, \"costs\": %s, \"buffers\": %s, " "{\"verbose\": %s, \"costs\": %s, \"buffers\": %s, \"wal\": %s, "
#if PG_VERSION_NUM >= PG_VERSION_13
"\"wal\": %s, "
#endif
"\"timing\": %s, \"summary\": %s, \"format\": \"%s\"}", "\"timing\": %s, \"summary\": %s, \"format\": \"%s\"}",
CurrentDistributedQueryExplainOptions.verbose ? "true" : "false", CurrentDistributedQueryExplainOptions.verbose ? "true" : "false",
CurrentDistributedQueryExplainOptions.costs ? "true" : "false", CurrentDistributedQueryExplainOptions.costs ? "true" : "false",
CurrentDistributedQueryExplainOptions.buffers ? "true" : "false", CurrentDistributedQueryExplainOptions.buffers ? "true" : "false",
#if PG_VERSION_NUM >= PG_VERSION_13
CurrentDistributedQueryExplainOptions.wal ? "true" : "false", CurrentDistributedQueryExplainOptions.wal ? "true" : "false",
#endif
CurrentDistributedQueryExplainOptions.timing ? "true" : "false", CurrentDistributedQueryExplainOptions.timing ? "true" : "false",
CurrentDistributedQueryExplainOptions.summary ? "true" : "false", CurrentDistributedQueryExplainOptions.summary ? "true" : "false",
ExplainFormatStr(CurrentDistributedQueryExplainOptions.format)); ExplainFormatStr(CurrentDistributedQueryExplainOptions.format));
@ -1632,13 +1609,11 @@ ExplainOneQuery(Query *query, int cursorOptions,
{ {
instr_time planstart, instr_time planstart,
planduration; planduration;
#if PG_VERSION_NUM >= PG_VERSION_13
BufferUsage bufusage_start, BufferUsage bufusage_start,
bufusage; bufusage;
if (es->buffers) if (es->buffers)
bufusage_start = pgBufferUsage; bufusage_start = pgBufferUsage;
#endif
INSTR_TIME_SET_CURRENT(planstart); INSTR_TIME_SET_CURRENT(planstart);
/* plan the query */ /* plan the query */
@ -1647,15 +1622,13 @@ ExplainOneQuery(Query *query, int cursorOptions,
INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SET_CURRENT(planduration);
INSTR_TIME_SUBTRACT(planduration, planstart); INSTR_TIME_SUBTRACT(planduration, planstart);
#if PG_VERSION_NUM >= PG_VERSION_13
/* calc differences of buffer counters. */ /* calc differences of buffer counters. */
if (es->buffers) if (es->buffers)
{ {
memset(&bufusage, 0, sizeof(BufferUsage)); memset(&bufusage, 0, sizeof(BufferUsage));
BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
} }
#endif
/* run it (if needed) and produce output */ /* run it (if needed) and produce output */
ExplainOnePlanCompat(plan, into, es, queryString, params, queryEnv, ExplainOnePlanCompat(plan, into, es, queryString, params, queryEnv,
&planduration, (es->buffers ? &bufusage : NULL)); &planduration, (es->buffers ? &bufusage : NULL));
@ -1696,10 +1669,10 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es
if (es->buffers) if (es->buffers)
instrument_option |= INSTRUMENT_BUFFERS; instrument_option |= INSTRUMENT_BUFFERS;
#if PG_VERSION_NUM >= PG_VERSION_13
if (es->wal) if (es->wal)
instrument_option |= INSTRUMENT_WAL; instrument_option |= INSTRUMENT_WAL;
#endif
/* /*
* We always collect timing for the entire statement, even when node-level * We always collect timing for the entire statement, even when node-level
* timing is off, so we don't look at es->timing here. (We could skip * timing is off, so we don't look at es->timing here. (We could skip

View File

@ -1490,9 +1490,7 @@ MasterExtendedOpNode(MultiExtendedOp *originalOpNode,
masterExtendedOpNode->hasDistinctOn = originalOpNode->hasDistinctOn; masterExtendedOpNode->hasDistinctOn = originalOpNode->hasDistinctOn;
masterExtendedOpNode->limitCount = originalOpNode->limitCount; masterExtendedOpNode->limitCount = originalOpNode->limitCount;
masterExtendedOpNode->limitOffset = originalOpNode->limitOffset; masterExtendedOpNode->limitOffset = originalOpNode->limitOffset;
#if PG_VERSION_NUM >= PG_VERSION_13
masterExtendedOpNode->limitOption = originalOpNode->limitOption; masterExtendedOpNode->limitOption = originalOpNode->limitOption;
#endif
masterExtendedOpNode->havingQual = newHavingQual; masterExtendedOpNode->havingQual = newHavingQual;
if (!extendedOpNodeProperties->onlyPushableWindowFunctions) if (!extendedOpNodeProperties->onlyPushableWindowFunctions)
@ -2489,14 +2487,12 @@ WorkerExtendedOpNode(MultiExtendedOp *originalOpNode,
workerExtendedOpNode->windowClause = queryWindowClause.workerWindowClauseList; workerExtendedOpNode->windowClause = queryWindowClause.workerWindowClauseList;
workerExtendedOpNode->sortClauseList = queryOrderByLimit.workerSortClauseList; workerExtendedOpNode->sortClauseList = queryOrderByLimit.workerSortClauseList;
workerExtendedOpNode->limitCount = queryOrderByLimit.workerLimitCount; workerExtendedOpNode->limitCount = queryOrderByLimit.workerLimitCount;
#if PG_VERSION_NUM >= PG_VERSION_13
/* /*
* If the limitCount cannot be pushed down it will be NULL, so the deparser will * If the limitCount cannot be pushed down it will be NULL, so the deparser will
* ignore the limitOption. * ignore the limitOption.
*/ */
workerExtendedOpNode->limitOption = originalOpNode->limitOption; workerExtendedOpNode->limitOption = originalOpNode->limitOption;
#endif
return workerExtendedOpNode; return workerExtendedOpNode;
} }

View File

@ -1734,9 +1734,7 @@ MultiExtendedOpNode(Query *queryTree, Query *originalQuery)
extendedOpNode->sortClauseList = queryTree->sortClause; extendedOpNode->sortClauseList = queryTree->sortClause;
extendedOpNode->limitCount = queryTree->limitCount; extendedOpNode->limitCount = queryTree->limitCount;
extendedOpNode->limitOffset = queryTree->limitOffset; extendedOpNode->limitOffset = queryTree->limitOffset;
#if PG_VERSION_NUM >= PG_VERSION_13
extendedOpNode->limitOption = queryTree->limitOption; extendedOpNode->limitOption = queryTree->limitOption;
#endif
extendedOpNode->havingQual = queryTree->havingQual; extendedOpNode->havingQual = queryTree->havingQual;
extendedOpNode->distinctClause = queryTree->distinctClause; extendedOpNode->distinctClause = queryTree->distinctClause;
extendedOpNode->hasDistinctOn = queryTree->hasDistinctOn; extendedOpNode->hasDistinctOn = queryTree->hasDistinctOn;

View File

@ -230,9 +230,7 @@ static List * FetchEqualityAttrNumsForRTEOpExpr(OpExpr *opExpr);
static List * FetchEqualityAttrNumsForRTEBoolExpr(BoolExpr *boolExpr); static List * FetchEqualityAttrNumsForRTEBoolExpr(BoolExpr *boolExpr);
static List * FetchEqualityAttrNumsForList(List *nodeList); static List * FetchEqualityAttrNumsForList(List *nodeList);
static int PartitionColumnIndex(Var *targetVar, List *targetList); static int PartitionColumnIndex(Var *targetVar, List *targetList);
#if PG_VERSION_NUM >= PG_VERSION_13
static List * GetColumnOriginalIndexes(Oid relationId); static List * GetColumnOriginalIndexes(Oid relationId);
#endif
/* /*
@ -541,9 +539,7 @@ BuildJobQuery(MultiNode *multiNode, List *dependentJobList)
List *sortClauseList = NIL; List *sortClauseList = NIL;
Node *limitCount = NULL; Node *limitCount = NULL;
Node *limitOffset = NULL; Node *limitOffset = NULL;
#if PG_VERSION_NUM >= PG_VERSION_13
LimitOption limitOption = LIMIT_OPTION_DEFAULT; LimitOption limitOption = LIMIT_OPTION_DEFAULT;
#endif
Node *havingQual = NULL; Node *havingQual = NULL;
bool hasDistinctOn = false; bool hasDistinctOn = false;
List *distinctClause = NIL; List *distinctClause = NIL;
@ -625,9 +621,7 @@ BuildJobQuery(MultiNode *multiNode, List *dependentJobList)
limitCount = extendedOp->limitCount; limitCount = extendedOp->limitCount;
limitOffset = extendedOp->limitOffset; limitOffset = extendedOp->limitOffset;
#if PG_VERSION_NUM >= PG_VERSION_13
limitOption = extendedOp->limitOption; limitOption = extendedOp->limitOption;
#endif
sortClauseList = extendedOp->sortClauseList; sortClauseList = extendedOp->sortClauseList;
havingQual = extendedOp->havingQual; havingQual = extendedOp->havingQual;
} }
@ -683,9 +677,7 @@ BuildJobQuery(MultiNode *multiNode, List *dependentJobList)
jobQuery->groupClause = groupClauseList; jobQuery->groupClause = groupClauseList;
jobQuery->limitOffset = limitOffset; jobQuery->limitOffset = limitOffset;
jobQuery->limitCount = limitCount; jobQuery->limitCount = limitCount;
#if PG_VERSION_NUM >= PG_VERSION_13
jobQuery->limitOption = limitOption; jobQuery->limitOption = limitOption;
#endif
jobQuery->havingQual = havingQual; jobQuery->havingQual = havingQual;
jobQuery->hasAggs = contain_aggs_of_level((Node *) targetList, 0) || jobQuery->hasAggs = contain_aggs_of_level((Node *) targetList, 0) ||
contain_aggs_of_level((Node *) havingQual, 0); contain_aggs_of_level((Node *) havingQual, 0);
@ -1338,8 +1330,6 @@ static void
SetJoinRelatedColumnsCompat(RangeTblEntry *rangeTableEntry, Oid leftRelId, Oid rightRelId, SetJoinRelatedColumnsCompat(RangeTblEntry *rangeTableEntry, Oid leftRelId, Oid rightRelId,
List *leftColumnVars, List *rightColumnVars) List *leftColumnVars, List *rightColumnVars)
{ {
#if PG_VERSION_NUM >= PG_VERSION_13
/* We don't have any merged columns so set it to 0 */ /* We don't have any merged columns so set it to 0 */
rangeTableEntry->joinmergedcols = 0; rangeTableEntry->joinmergedcols = 0;
@ -1362,13 +1352,9 @@ SetJoinRelatedColumnsCompat(RangeTblEntry *rangeTableEntry, Oid leftRelId, Oid r
int rightColsSize = list_length(rightColumnVars); int rightColsSize = list_length(rightColumnVars);
rangeTableEntry->joinrightcols = GeneratePositiveIntSequenceList(rightColsSize); rangeTableEntry->joinrightcols = GeneratePositiveIntSequenceList(rightColsSize);
} }
#endif
} }
#if PG_VERSION_NUM >= PG_VERSION_13
/* /*
* GetColumnOriginalIndexes gets the original indexes of columns by taking column drops into account. * GetColumnOriginalIndexes gets the original indexes of columns by taking column drops into account.
*/ */
@ -1392,8 +1378,6 @@ GetColumnOriginalIndexes(Oid relationId)
} }
#endif
/* /*
* ExtractRangeTableId gets the range table id from a node that could * ExtractRangeTableId gets the range table id from a node that could
* either be a JoinExpr or RangeTblRef. * either be a JoinExpr or RangeTblRef.

View File

@ -167,10 +167,7 @@ static bool FindQueryContainingRTEIdentityInternal(Node *node,
FindQueryContainingRteIdentityContext * FindQueryContainingRteIdentityContext *
context); context);
#if PG_VERSION_NUM >= PG_VERSION_13
static int ParentCountPriorToAppendRel(List *appendRelList, AppendRelInfo *appendRelInfo); static int ParentCountPriorToAppendRel(List *appendRelList, AppendRelInfo *appendRelInfo);
#endif
/* /*
@ -398,12 +395,10 @@ SafeToPushdownUnionSubquery(Query *originalQuery,
/* /*
* RangeTableOffsetCompat returns the range table offset(in glob->finalrtable) for the appendRelInfo. * RangeTableOffsetCompat returns the range table offset(in glob->finalrtable) for the appendRelInfo.
* For PG < 13 this is a no op.
*/ */
static int static int
RangeTableOffsetCompat(PlannerInfo *root, AppendRelInfo *appendRelInfo) RangeTableOffsetCompat(PlannerInfo *root, AppendRelInfo *appendRelInfo)
{ {
#if PG_VERSION_NUM >= PG_VERSION_13
int parentCount = ParentCountPriorToAppendRel(root->append_rel_list, appendRelInfo); int parentCount = ParentCountPriorToAppendRel(root->append_rel_list, appendRelInfo);
int skipParentCount = parentCount - 1; int skipParentCount = parentCount - 1;
@ -434,9 +429,6 @@ RangeTableOffsetCompat(PlannerInfo *root, AppendRelInfo *appendRelInfo)
*/ */
int parentRelIndex = appendRelInfo->parent_relid - 1; int parentRelIndex = appendRelInfo->parent_relid - 1;
return parentRelIndex - indexInRtable; return parentRelIndex - indexInRtable;
#else
return 0;
#endif
} }
@ -1482,8 +1474,6 @@ AddUnionAllSetOperationsToAttributeEquivalenceClass(AttributeEquivalenceClass *
} }
#if PG_VERSION_NUM >= PG_VERSION_13
/* /*
* ParentCountPriorToAppendRel returns the number of parents that come before * ParentCountPriorToAppendRel returns the number of parents that come before
* the given append rel info. * the given append rel info.
@ -1506,8 +1496,6 @@ ParentCountPriorToAppendRel(List *appendRelList, AppendRelInfo *targetAppendRelI
} }
#endif
/* /*
* AddUnionSetOperationsToAttributeEquivalenceClass recursively iterates on all the * AddUnionSetOperationsToAttributeEquivalenceClass recursively iterates on all the
* setOperations and adds each corresponding target entry to the given equivalence * setOperations and adds each corresponding target entry to the given equivalence

View File

@ -112,7 +112,6 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
break; break;
} }
#if PG_VERSION_NUM >= PG_VERSION_13
case T_AlterStatsStmt: case T_AlterStatsStmt:
{ {
AlterStatsStmt *alterStatsStmt = (AlterStatsStmt *) parseTree; AlterStatsStmt *alterStatsStmt = (AlterStatsStmt *) parseTree;
@ -124,7 +123,6 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
break; break;
} }
#endif
case T_AlterTableStmt: case T_AlterTableStmt:
{ {

View File

@ -17,13 +17,9 @@
#include "distributed/pg_version_constants.h" #include "distributed/pg_version_constants.h"
#if PG_VERSION_NUM >= PG_VERSION_12
#include "access/genam.h" #include "access/genam.h"
#endif
#if PG_VERSION_NUM >= PG_VERSION_13
#include "postmaster/interrupt.h" #include "postmaster/interrupt.h"
#endif
#include "access/htup_details.h" #include "access/htup_details.h"
#include "access/sysattr.h" #include "access/sysattr.h"
@ -2039,13 +2035,11 @@ WaitForMiliseconds(long timeout)
CHECK_FOR_INTERRUPTS(); CHECK_FOR_INTERRUPTS();
} }
#if PG_VERSION_NUM >= PG_VERSION_13
if (ConfigReloadPending) if (ConfigReloadPending)
{ {
ConfigReloadPending = false; ConfigReloadPending = false;
ProcessConfigFile(PGC_SIGHUP); ProcessConfigFile(PGC_SIGHUP);
} }
#endif
} }

View File

@ -29,9 +29,7 @@
#include "distributed/metadata_cache.h" #include "distributed/metadata_cache.h"
#include "distributed/relation_access_tracking.h" #include "distributed/relation_access_tracking.h"
#include "utils/hsearch.h" #include "utils/hsearch.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
#include "utils/lsyscache.h" #include "utils/lsyscache.h"

View File

@ -323,9 +323,7 @@ OutMultiExtendedOp(OUTFUNC_ARGS)
WRITE_NODE_FIELD(sortClauseList); WRITE_NODE_FIELD(sortClauseList);
WRITE_NODE_FIELD(limitCount); WRITE_NODE_FIELD(limitCount);
WRITE_NODE_FIELD(limitOffset); WRITE_NODE_FIELD(limitOffset);
#if PG_VERSION_NUM >= PG_VERSION_13
WRITE_ENUM_FIELD(limitOption, LimitOption); WRITE_ENUM_FIELD(limitOption, LimitOption);
#endif
WRITE_NODE_FIELD(havingQual); WRITE_NODE_FIELD(havingQual);
WRITE_BOOL_FIELD(hasDistinctOn); WRITE_BOOL_FIELD(hasDistinctOn);
WRITE_NODE_FIELD(distinctClause); WRITE_NODE_FIELD(distinctClause);

View File

@ -29,9 +29,7 @@
#include "storage/lockdefs.h" #include "storage/lockdefs.h"
#include "utils/fmgroids.h" #include "utils/fmgroids.h"
#include "utils/hsearch.h" #include "utils/hsearch.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
#include "utils/inval.h" #include "utils/inval.h"
#include "utils/memutils.h" #include "utils/memutils.h"

View File

@ -53,9 +53,7 @@
#include "storage/lmgr.h" #include "storage/lmgr.h"
#include "storage/lwlock.h" #include "storage/lwlock.h"
#include "tcop/tcopprot.h" #include "tcop/tcopprot.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
#include "utils/memutils.h" #include "utils/memutils.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"

View File

@ -8,9 +8,7 @@
#include "distributed/pg_version_constants.h" #include "distributed/pg_version_constants.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h" #include "common/hashfn.h"
#endif
#include "commands/dbcommands.h" #include "commands/dbcommands.h"
#include "distributed/citus_custom_scan.h" #include "distributed/citus_custom_scan.h"

View File

@ -18,9 +18,6 @@
#include "access/xact.h" #include "access/xact.h"
#include "catalog/dependency.h" #include "catalog/dependency.h"
#include "catalog/pg_depend.h" #include "catalog/pg_depend.h"
#if PG_VERSION_NUM < PG_VERSION_13
#include "catalog/pg_depend_d.h"
#endif
#include "catalog/pg_foreign_server.h" #include "catalog/pg_foreign_server.h"
#include "distributed/citus_ruleutils.h" #include "distributed/citus_ruleutils.h"
#include "distributed/distribution_column.h" #include "distributed/distribution_column.h"
@ -43,10 +40,6 @@ PG_FUNCTION_INFO_V1(worker_drop_shell_table);
PG_FUNCTION_INFO_V1(worker_drop_sequence_dependency); PG_FUNCTION_INFO_V1(worker_drop_sequence_dependency);
static void WorkerDropDistributedTable(Oid relationId); static void WorkerDropDistributedTable(Oid relationId);
#if PG_VERSION_NUM < PG_VERSION_13
static long deleteDependencyRecordsForSpecific(Oid classId, Oid objectId, char deptype,
Oid refclassId, Oid refobjectId);
#endif
/* /*
@ -131,11 +124,7 @@ WorkerDropDistributedTable(Oid relationId)
ObjectAddressSet(*distributedTableObject, RelationRelationId, relationId); ObjectAddressSet(*distributedTableObject, RelationRelationId, relationId);
/* Drop dependent sequences from pg_dist_object */ /* Drop dependent sequences from pg_dist_object */
#if PG_VERSION_NUM >= PG_VERSION_13
List *ownedSequences = getOwnedSequences(relationId); List *ownedSequences = getOwnedSequences(relationId);
#else
List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber);
#endif
Oid ownedSequenceOid = InvalidOid; Oid ownedSequenceOid = InvalidOid;
foreach_oid(ownedSequenceOid, ownedSequences) foreach_oid(ownedSequenceOid, ownedSequences)
@ -247,11 +236,7 @@ worker_drop_shell_table(PG_FUNCTION_ARGS)
} }
/* Drop dependent sequences from pg_dist_object */ /* Drop dependent sequences from pg_dist_object */
#if PG_VERSION_NUM >= PG_VERSION_13
List *ownedSequences = getOwnedSequences(relationId); List *ownedSequences = getOwnedSequences(relationId);
#else
List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber);
#endif
Oid ownedSequenceOid = InvalidOid; Oid ownedSequenceOid = InvalidOid;
foreach_oid(ownedSequenceOid, ownedSequences) foreach_oid(ownedSequenceOid, ownedSequences)
@ -299,11 +284,7 @@ worker_drop_sequence_dependency(PG_FUNCTION_ARGS)
EnsureTableOwner(relationId); EnsureTableOwner(relationId);
/* break the dependent sequences from the table */ /* break the dependent sequences from the table */
#if PG_VERSION_NUM >= PG_VERSION_13
List *ownedSequences = getOwnedSequences(relationId); List *ownedSequences = getOwnedSequences(relationId);
#else
List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber);
#endif
Oid ownedSequenceOid = InvalidOid; Oid ownedSequenceOid = InvalidOid;
foreach_oid(ownedSequenceOid, ownedSequences) foreach_oid(ownedSequenceOid, ownedSequences)
@ -322,59 +303,3 @@ worker_drop_sequence_dependency(PG_FUNCTION_ARGS)
PG_RETURN_VOID(); PG_RETURN_VOID();
} }
/* *INDENT-OFF* */
#if PG_VERSION_NUM < PG_VERSION_13
/*
* This function is already available on PG 13+.
* deleteDependencyRecordsForSpecific -- delete all records with given depender
* classId/objectId, dependee classId/objectId, of the given deptype.
* Returns the number of records deleted.
*/
static long
deleteDependencyRecordsForSpecific(Oid classId, Oid objectId, char deptype,
Oid refclassId, Oid refobjectId)
{
long count = 0;
Relation depRel;
ScanKeyData key[2];
HeapTuple tup;
depRel = table_open(DependRelationId, RowExclusiveLock);
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classId));
ScanKeyInit(&key[1],
Anum_pg_depend_objid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(objectId));
SysScanDesc scan =
systable_beginscan(depRel, DependDependerIndexId, true,
NULL, 2, key);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_depend depform = (Form_pg_depend) GETSTRUCT(tup);
if (depform->refclassid == refclassId &&
depform->refobjid == refobjectId &&
depform->deptype == deptype)
{
CatalogTupleDelete(depRel, &tup->t_self);
count++;
}
}
systable_endscan(scan);
table_close(depRel, RowExclusiveLock);
return count;
}
#endif
/* *INDENT-ON* */

View File

@ -50,8 +50,4 @@
#define ExplainPropertyLong(qlabel, value, es) \ #define ExplainPropertyLong(qlabel, value, es) \
ExplainPropertyInteger(qlabel, NULL, value, es) ExplainPropertyInteger(qlabel, NULL, value, es)
#if PG_VERSION_NUM < 130000
#define detoast_attr(X) heap_tuple_untoast_attr(X)
#endif
#endif /* COLUMNAR_COMPAT_H */ #endif /* COLUMNAR_COMPAT_H */

View File

@ -199,16 +199,10 @@ typedef struct CitusCustomScanPath
} CitusCustomScanPath; } CitusCustomScanPath;
#if PG_VERSION_NUM >= PG_VERSION_13
extern PlannedStmt * distributed_planner(Query *parse, extern PlannedStmt * distributed_planner(Query *parse,
const char *query_string, const char *query_string,
int cursorOptions, int cursorOptions,
ParamListInfo boundParams); ParamListInfo boundParams);
#else
extern PlannedStmt * distributed_planner(Query *parse,
int cursorOptions,
ParamListInfo boundParams);
#endif
/* /*

View File

@ -145,10 +145,7 @@ typedef struct ListCellAndListWrapper
* *
* For more information, see postgres commit with sha * For more information, see postgres commit with sha
* 1cff1b95ab6ddae32faa3efe0d95a820dbfdc164 * 1cff1b95ab6ddae32faa3efe0d95a820dbfdc164
*/ *
#if PG_VERSION_NUM >= PG_VERSION_13
/*
* How it works: * How it works:
* - An index is declared with the name {var}PositionDoNotUse and used * - An index is declared with the name {var}PositionDoNotUse and used
* throughout the for loop using ## to concat. * throughout the for loop using ## to concat.
@ -162,9 +159,6 @@ typedef struct ListCellAndListWrapper
(var ## PositionDoNotUse) < list_length(l) && \ (var ## PositionDoNotUse) < list_length(l) && \
(((var) = list_nth(l, var ## PositionDoNotUse)) || true); \ (((var) = list_nth(l, var ## PositionDoNotUse)) || true); \
var ## PositionDoNotUse ++) var ## PositionDoNotUse ++)
#else
#define foreach_ptr_append(var, l) foreach_ptr(var, l)
#endif
/* utility functions declaration shared within this module */ /* utility functions declaration shared within this module */
extern List * SortList(List *pointerList, extern List * SortList(List *pointerList,

View File

@ -178,9 +178,7 @@ typedef struct MultiExtendedOp
List *sortClauseList; List *sortClauseList;
Node *limitCount; Node *limitCount;
Node *limitOffset; Node *limitOffset;
#if PG_VERSION_NUM >= PG_VERSION_13
LimitOption limitOption; LimitOption limitOption;
#endif
Node *havingQual; Node *havingQual;
List *distinctClause; List *distinctClause;
List *windowClause; List *windowClause;

View File

@ -11,7 +11,6 @@
#ifndef PG_VERSION_CONSTANTS #ifndef PG_VERSION_CONSTANTS
#define PG_VERSION_CONSTANTS #define PG_VERSION_CONSTANTS
#define PG_VERSION_12 120000
#define PG_VERSION_13 130000 #define PG_VERSION_13 130000
#define PG_VERSION_14 140000 #define PG_VERSION_14 140000
#define PG_VERSION_15 150000 #define PG_VERSION_15 150000

View File

@ -24,14 +24,10 @@
#include "parser/parse_func.h" #include "parser/parse_func.h"
#include "optimizer/optimizer.h" #include "optimizer/optimizer.h"
#if (PG_VERSION_NUM >= PG_VERSION_13)
#include "tcop/tcopprot.h" #include "tcop/tcopprot.h"
#endif
#include "pg_version_compat.h" #include "pg_version_compat.h"
#if PG_VERSION_NUM >= PG_VERSION_12
typedef struct typedef struct
{ {
File fd; File fd;
@ -76,6 +72,4 @@ FileCompatFromFileStart(File fileDesc)
} }
#endif /* PG12 */
#endif /* VERSION_COMPAT_H */ #endif /* VERSION_COMPAT_H */

View File

@ -123,7 +123,6 @@ RelationGetSmgr(Relation rel)
#define ROLE_PG_READ_ALL_STATS DEFAULT_ROLE_READ_ALL_STATS #define ROLE_PG_READ_ALL_STATS DEFAULT_ROLE_READ_ALL_STATS
#endif #endif
#if PG_VERSION_NUM >= PG_VERSION_13
#define lnext_compat(l, r) lnext(l, r) #define lnext_compat(l, r) lnext(l, r)
#define list_delete_cell_compat(l, c, p) list_delete_cell(l, c) #define list_delete_cell_compat(l, c, p) list_delete_cell(l, c)
#define pg_plan_query_compat(p, q, c, b) pg_plan_query(p, q, c, b) #define pg_plan_query_compat(p, q, c, b) pg_plan_query(p, q, c, b)
@ -137,23 +136,6 @@ RelationGetSmgr(Relation rel)
#define SetListCellPtr(a, b) ((a)->ptr_value = (b)) #define SetListCellPtr(a, b) ((a)->ptr_value = (b))
#define RangeTableEntryFromNSItem(a) ((a)->p_rte) #define RangeTableEntryFromNSItem(a) ((a)->p_rte)
#define QueryCompletionCompat QueryCompletion #define QueryCompletionCompat QueryCompletion
#else /* pre PG13 */
#define lnext_compat(l, r) lnext(r)
#define list_delete_cell_compat(l, c, p) list_delete_cell(l, c, p)
#define pg_plan_query_compat(p, q, c, b) pg_plan_query(p, c, b)
#define planner_compat(p, c, b) planner(p, c, b)
#define standard_planner_compat(a, c, d) standard_planner(a, c, d)
#define CMDTAG_SELECT_COMPAT "SELECT"
#define GetSequencesOwnedByRelation(a) getOwnedSequences(a, InvalidAttrNumber)
#define GetSequencesOwnedByColumn(a, b) getOwnedSequences(a, b)
#define ExplainOnePlanCompat(a, b, c, d, e, f, g, h) ExplainOnePlan(a, b, c, d, e, f, g)
#define SetListCellPtr(a, b) ((a)->data.ptr_value = (b))
#define RangeTableEntryFromNSItem(a) (a)
#define QueryCompletionCompat char
#define varattnosyn varoattno
#define varnosyn varnoold
#endif
#if PG_VERSION_NUM >= PG_VERSION_12
#define CreateTableSlotForRel(rel) table_slot_create(rel, NULL) #define CreateTableSlotForRel(rel) table_slot_create(rel, NULL)
#define MakeSingleTupleTableSlotCompat MakeSingleTupleTableSlot #define MakeSingleTupleTableSlotCompat MakeSingleTupleTableSlot
@ -172,8 +154,6 @@ RelationGetSmgr(Relation rel)
#define fcSetArgExt(fc, n, val, is_null) \ #define fcSetArgExt(fc, n, val, is_null) \
(((fc)->args[n].isnull = (is_null)), ((fc)->args[n].value = (val))) (((fc)->args[n].isnull = (is_null)), ((fc)->args[n].value = (val)))
#endif /* PG12 */
#define fcSetArg(fc, n, value) fcSetArgExt(fc, n, value, false) #define fcSetArg(fc, n, value) fcSetArgExt(fc, n, value, false)
#define fcSetArgNull(fc, n) fcSetArgExt(fc, n, (Datum) 0, true) #define fcSetArgNull(fc, n) fcSetArgExt(fc, n, (Datum) 0, true)

View File

@ -75,9 +75,6 @@ s/(job_[0-9]+\/task_[0-9]+\/p_[0-9]+\.)[0-9]+/\1xxxx/g
# isolation_ref2ref_foreign_keys # isolation_ref2ref_foreign_keys
s/"(ref_table_[0-9]_|ref_table_[0-9]_value_fkey_)[0-9]+"/"\1xxxxxxx"/g s/"(ref_table_[0-9]_|ref_table_[0-9]_value_fkey_)[0-9]+"/"\1xxxxxxx"/g
# pg11/pg12 varies in isolation debug output
s/s1: DEBUG:/DEBUG:/g
# commands cascading to shard relations # commands cascading to shard relations
s/(NOTICE: .*_)[0-9]{5,}( CASCADE)/\1xxxxx\2/g s/(NOTICE: .*_)[0-9]{5,}( CASCADE)/\1xxxxx\2/g
s/(NOTICE: [a-z]+ cascades to table ".*)_[0-9]{5,}"/\1_xxxxx"/g s/(NOTICE: [a-z]+ cascades to table ".*)_[0-9]{5,}"/\1_xxxxx"/g
@ -93,30 +90,15 @@ s/connectionId: [0-9]+/connectionId: xxxxxxx/g
s/ *$//g s/ *$//g
# pg12 changes # pg12 changes
s/Partitioned table "/Table "/g
s/\) TABLESPACE pg_default$/\)/g
s/invalid input syntax for type bigint:/invalid input syntax for integer:/g
s/invalid input syntax for type /invalid input syntax for /g
s/_id_ref_id_fkey/_id_fkey/g
s/_ref_id_id_fkey_/_ref_id_fkey_/g
s/fk_test_2_col1_col2_fkey/fk_test_2_col1_fkey/g
s/_id_other_column_ref_fkey/_id_fkey/g
s/"(collections_list_|collection_users_|collection_users_fkey_)[0-9]+"/"\1xxxxxxx"/g s/"(collections_list_|collection_users_|collection_users_fkey_)[0-9]+"/"\1xxxxxxx"/g
# pg13 changes # pg13 changes
s/of relation ".*" violates not-null constraint/violates not-null constraint/g s/of relation ".*" violates not-null constraint/violates not-null constraint/g
s/varnosyn/varnoold/g
s/varattnosyn/varoattno/g
/DEBUG: index ".*" can safely use deduplication.*$/d /DEBUG: index ".*" can safely use deduplication.*$/d
/DEBUG: index ".*" cannot use deduplication.*$/d /DEBUG: index ".*" cannot use deduplication.*$/d
/DEBUG: building index ".*" on table ".*" serially.*$/d /DEBUG: building index ".*" on table ".*" serially.*$/d
s/partition ".*" would be violated by some row/partition would be violated by some row/g s/partition ".*" would be violated by some row/partition would be violated by some row/g
/.*Peak Memory Usage:.*$/d
s/of relation ".*" contains null values/contains null values/g s/of relation ".*" contains null values/contains null values/g
s/of relation "t1" is violated by some row/is violated by some row/g
# pg13.1 changes
s/^ERROR: insufficient columns in PRIMARY KEY constraint definition$/ERROR: unique constraint on partitioned table must include all partitioning columns/g
# intermediate_results # intermediate_results
s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g
@ -157,21 +139,6 @@ s/Citus.*currently supports/Citus currently supports/g
s/prepared transaction with identifier .* does not exist/prepared transaction with identifier "citus_x_yyyyyy_zzz_w" does not exist/g s/prepared transaction with identifier .* does not exist/prepared transaction with identifier "citus_x_yyyyyy_zzz_w" does not exist/g
s/failed to roll back prepared transaction '.*'/failed to roll back prepared transaction 'citus_x_yyyyyy_zzz_w'/g s/failed to roll back prepared transaction '.*'/failed to roll back prepared transaction 'citus_x_yyyyyy_zzz_w'/g
# Table aliases for partitioned tables in explain outputs might change
# regardless of postgres appended an _int suffix to alias, we always append _xxx suffix
# Can be removed when we remove support for pg11 and pg12.
# "-> <scanMethod> Scan on <tableName>_<partitionId>_<shardId> <tableName>_<aliasId>" and
# "-> <scanMethod> Scan on <tableName>_<partitionId>_<shardId> <tableName>" becomes
# "-> <scanMethod> Scan on <tableName>_<partitionId>_<shardId> <tableName>_xxx"
s/(->.*Scan on\ +)(.*)(_[0-9]+)(_[0-9]+) \2(_[0-9]+|_xxx)?/\1\2\3\4 \2_xxx/g
# Table aliases for partitioned tables in "Hash Cond:" lines of explain outputs might change
# This is only for multi_partitioning.sql test file
# regardless of postgres appended an _int suffix to alias, we always append _xxx suffix
# Can be removed when we remove support for pg11 and pg12.
s/(partitioning_hash_join_test)(_[0-9]|_xxx)?(\.[a-zA-Z]+)/\1_xxx\3/g
s/(partitioning_hash_test)(_[0-9]|_xxx)?(\.[a-zA-Z]+)/\1_xxx\3/g
# Errors with binary decoding where OIDs should be normalized # Errors with binary decoding where OIDs should be normalized
s/wrong data type: [0-9]+, expected [0-9]+/wrong data type: XXXX, expected XXXX/g s/wrong data type: [0-9]+, expected [0-9]+/wrong data type: XXXX, expected XXXX/g

View File

@ -1,11 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven;
server_version_above_eleven
---------------------------------------------------------------------
t
(1 row)
\gset
CREATE SCHEMA alter_distributed_table; CREATE SCHEMA alter_distributed_table;
SET search_path TO alter_distributed_table; SET search_path TO alter_distributed_table;
SET citus.shard_count TO 4; SET citus.shard_count TO 4;
@ -469,7 +461,6 @@ SELECT alter_distributed_table('col_with_ref_to_dist', shard_count:=6, cascade_t
(1 row) (1 row)
\if :server_version_above_eleven
-- test altering columnar table -- test altering columnar table
CREATE TABLE columnar_table (a INT) USING columnar; CREATE TABLE columnar_table (a INT) USING columnar;
SELECT create_distributed_table('columnar_table', 'a', colocate_with:='none'); SELECT create_distributed_table('columnar_table', 'a', colocate_with:='none');
@ -496,7 +487,6 @@ SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHE
columnar_table | 6 | columnar columnar_table | 6 | columnar
(1 row) (1 row)
\endif
-- test complex cascade operations -- test complex cascade operations
CREATE TABLE cas_1 (a INT UNIQUE); CREATE TABLE cas_1 (a INT UNIQUE);
CREATE TABLE cas_2 (a INT UNIQUE); CREATE TABLE cas_2 (a INT UNIQUE);

View File

@ -12,13 +12,6 @@ NOTICE: renaming the new table to public.alter_am_pg_version_table
(1 row) (1 row)
DROP TABLE alter_am_pg_version_table; DROP TABLE alter_am_pg_version_table;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
\gset
\if :server_version_above_eleven
\else
\q
\endif
CREATE SCHEMA alter_table_set_access_method; CREATE SCHEMA alter_table_set_access_method;
SET search_path TO alter_table_set_access_method; SET search_path TO alter_table_set_access_method;
SET citus.shard_count TO 4; SET citus.shard_count TO 4;

View File

@ -641,7 +641,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'citus_local_table_4'::regclass;
SELECT column_name_to_column('citus_local_table_4', 'a'); SELECT column_name_to_column('citus_local_table_4', 'a');
column_name_to_column column_name_to_column
--------------------------------------------------------------------- ---------------------------------------------------------------------
{VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
(1 row) (1 row)
SELECT master_update_shard_statistics(shardid) SELECT master_update_shard_statistics(shardid)

View File

@ -769,8 +769,8 @@ SELECT logicalrelid, partmethod, partkey FROM pg_dist_partition
ORDER BY logicalrelid; ORDER BY logicalrelid;
logicalrelid | partmethod | partkey logicalrelid | partmethod | partkey
--------------------------------------------------------------------- ---------------------------------------------------------------------
parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location -1} parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 5 :location -1}
(2 rows) (2 rows)
-- some tests for view propagation on citus local tables -- some tests for view propagation on citus local tables

View File

@ -1,14 +1,6 @@
-- --
-- Test the TRUNCATE TABLE command for columnar tables. -- Test the TRUNCATE TABLE command for columnar tables.
-- --
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
---------------------------------------------------------------------
t
(1 row)
-- CREATE a columnar table, fill with some data -- -- CREATE a columnar table, fill with some data --
CREATE TABLE columnar_truncate_test (a int, b int) USING columnar; CREATE TABLE columnar_truncate_test (a int, b int) USING columnar;
CREATE TABLE columnar_truncate_test_second (a int, b int) USING columnar; CREATE TABLE columnar_truncate_test_second (a int, b int) USING columnar;

View File

@ -74,7 +74,7 @@ INSERT INTO test_lseg VALUES ('( 1 , 2 ) , ( 3 , 4 )');
SELECT minimum_value, maximum_value FROM columnar.chunk; SELECT minimum_value, maximum_value FROM columnar.chunk;
minimum_value | maximum_value minimum_value | maximum_value
--------------------------------------------------------------------- ---------------------------------------------------------------------
| |
(1 row) (1 row)
SELECT * FROM test_lseg WHERE a = '( 1 , 2 ) , ( 3 , 4 )'; SELECT * FROM test_lseg WHERE a = '( 1 , 2 ) , ( 3 , 4 )';
@ -151,13 +151,6 @@ SELECT * FROM test_user_defined_color WHERE a = 'red';
DROP TABLE test_user_defined_color; DROP TABLE test_user_defined_color;
DROP TYPE user_defined_color; DROP TYPE user_defined_color;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q
\endif
-- pg_snapshot -- pg_snapshot
CREATE TABLE test_pg_snapshot (a pg_snapshot) USING columnar; CREATE TABLE test_pg_snapshot (a pg_snapshot) USING columnar;
INSERT INTO test_pg_snapshot VALUES ('10:20:10,14,15'); INSERT INTO test_pg_snapshot VALUES ('10:20:10,14,15');

View File

@ -1,159 +0,0 @@
--
-- Testing data types without comparison operators
-- If a data type doesn't have comparison operators, we should store NULL for min/max values
-- Verify that (1) min/max entries in columnar.chunk is NULL as expected
-- (2) we can run queries which has equality conditions in WHERE clause for that column with correct results
--
-- varchar
CREATE TABLE test_varchar (a varchar) USING columnar;
INSERT INTO test_varchar VALUES ('Hello');
SELECT minimum_value, maximum_value FROM columnar.chunk;
minimum_value | maximum_value
---------------------------------------------------------------------
|
(1 row)
SELECT * FROM test_varchar WHERE a = 'Hello';
a
---------------------------------------------------------------------
Hello
(1 row)
DROP TABLE test_varchar;
-- cidr
CREATE TABLE test_cidr (a cidr) USING columnar;
INSERT INTO test_cidr VALUES ('192.168.100.128/25');
SELECT minimum_value, maximum_value FROM columnar.chunk;
minimum_value | maximum_value
---------------------------------------------------------------------
|
(1 row)
SELECT * FROM test_cidr WHERE a = '192.168.100.128/25';
a
---------------------------------------------------------------------
192.168.100.128/25
(1 row)
DROP TABLE test_cidr;
-- json
CREATE TABLE test_json (a json) USING columnar;
INSERT INTO test_json VALUES ('5'::json);
SELECT minimum_value, maximum_value FROM columnar.chunk;
minimum_value | maximum_value
---------------------------------------------------------------------
|
(1 row)
SELECT * FROM test_json WHERE a::text = '5'::json::text;
a
---------------------------------------------------------------------
5
(1 row)
DROP TABLE test_json;
-- line
CREATE TABLE test_line (a line) USING columnar;
INSERT INTO test_line VALUES ('{1, 2, 3}');
SELECT minimum_value, maximum_value FROM columnar.chunk;
minimum_value | maximum_value
---------------------------------------------------------------------
|
(1 row)
SELECT * FROM test_line WHERE a = '{1, 2, 3}';
a
---------------------------------------------------------------------
{1,2,3}
(1 row)
DROP TABLE test_line;
-- lseg
CREATE TABLE test_lseg (a lseg) USING columnar;
INSERT INTO test_lseg VALUES ('( 1 , 2 ) , ( 3 , 4 )');
SELECT minimum_value, maximum_value FROM columnar.chunk;
minimum_value | maximum_value
---------------------------------------------------------------------
|
(1 row)
SELECT * FROM test_lseg WHERE a = '( 1 , 2 ) , ( 3 , 4 )';
a
---------------------------------------------------------------------
[(1,2),(3,4)]
(1 row)
DROP TABLE test_lseg;
-- path
CREATE TABLE test_path (a path) USING columnar;
INSERT INTO test_path VALUES ('( 1 , 2 ) , ( 3 , 4 ) , ( 5 , 6 )');
SELECT minimum_value, maximum_value FROM columnar.chunk;
minimum_value | maximum_value
---------------------------------------------------------------------
|
(1 row)
SELECT * FROM test_path WHERE a = '( 1 , 2 ) , ( 3 , 4 ) , ( 5 , 6 )';
a
---------------------------------------------------------------------
((1,2),(3,4),(5,6))
(1 row)
DROP TABLE test_path;
-- txid_snapshot
CREATE TABLE test_txid_snapshot (a txid_snapshot) USING columnar;
INSERT INTO test_txid_snapshot VALUES ('10:20:10,14,15');
SELECT minimum_value, maximum_value FROM columnar.chunk;
minimum_value | maximum_value
---------------------------------------------------------------------
|
(1 row)
SELECT * FROM test_txid_snapshot WHERE a::text = '10:20:10,14,15'::txid_snapshot::text;
a
---------------------------------------------------------------------
10:20:10,14,15
(1 row)
DROP TABLE test_txid_snapshot;
-- xml
CREATE TABLE test_xml (a xml) USING columnar;
INSERT INTO test_xml VALUES ('<foo>bar</foo>'::xml);
SELECT minimum_value, maximum_value FROM columnar.chunk;
minimum_value | maximum_value
---------------------------------------------------------------------
|
(1 row)
SELECT * FROM test_xml WHERE a::text = '<foo>bar</foo>'::xml::text;
a
---------------------------------------------------------------------
<foo>bar</foo>
(1 row)
DROP TABLE test_xml;
-- user defined
CREATE TYPE user_defined_color AS ENUM ('red', 'orange', 'yellow',
'green', 'blue', 'purple');
CREATE TABLE test_user_defined_color (a user_defined_color) USING columnar;
INSERT INTO test_user_defined_color VALUES ('red');
SELECT minimum_value, maximum_value FROM columnar.chunk;
minimum_value | maximum_value
---------------------------------------------------------------------
|
(1 row)
SELECT * FROM test_user_defined_color WHERE a = 'red';
a
---------------------------------------------------------------------
red
(1 row)
DROP TABLE test_user_defined_color;
DROP TYPE user_defined_color;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q

View File

@ -9,15 +9,6 @@ SELECT create_distributed_table ('test_table', 'key');
(1 row) (1 row)
INSERT INTO test_table SELECT i % 10, 'test' || i, row_to_json(row(i, i*18, 'test' || i)) FROM generate_series (0, 100) i; INSERT INTO test_table SELECT i % 10, 'test' || i, row_to_json(row(i, i*18, 'test' || i)) FROM generate_series (0, 100) i;
-- server version because CTE inlining might produce
-- different debug messages in PG 11 vs PG 12
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 12;
?column?
---------------------------------------------------------------------
t
(1 row)
SET client_min_messages TO DEBUG; SET client_min_messages TO DEBUG;
-- Citus should not inline this CTE because otherwise it cannot -- Citus should not inline this CTE because otherwise it cannot
-- plan the query -- plan the query

View File

@ -561,7 +561,7 @@ SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', '$3'
ERROR: cannot distribute the function "eq_with_param_names" since the distribution argument is not valid ERROR: cannot distribute the function "eq_with_param_names" since the distribution argument is not valid
HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function()
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', '$1a'); SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', '$1a');
ERROR: invalid input syntax for integer: "1a" ERROR: invalid input syntax for type integer: "1a"
-- non existing column name -- non existing column name
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', 'aaa'); SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', 'aaa');
ERROR: cannot distribute the function "eq_with_param_names" since the distribution argument is not valid ERROR: cannot distribute the function "eq_with_param_names" since the distribution argument is not valid

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven;
version_above_eleven
---------------------------------------------------------------------
t
(1 row)
SET citus.next_shard_id TO 20040000; SET citus.next_shard_id TO 20040000;
CREATE SCHEMA xact_enum_type; CREATE SCHEMA xact_enum_type;
SET search_path TO xact_enum_type; SET search_path TO xact_enum_type;

View File

@ -1,6 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 12 AS have_table_am
\gset
\c - - - :master_port \c - - - :master_port
CREATE SCHEMA single_node; CREATE SCHEMA single_node;
SET search_path TO single_node; SET search_path TO single_node;
@ -390,11 +387,7 @@ SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y);
RESET citus.task_assignment_policy; RESET citus.task_assignment_policy;
-- Simple columnar follower test -- Simple columnar follower test
\c -reuse-previous=off regression - - :master_port \c -reuse-previous=off regression - - :master_port
\if :have_table_am
CREATE TABLE columnar_test (a int, b int) USING columnar; CREATE TABLE columnar_test (a int, b int) USING columnar;
\else
CREATE TABLE columnar_test (a int, b int);
\endif
INSERT INTO columnar_test(a, b) VALUES (1, 1); INSERT INTO columnar_test(a, b) VALUES (1, 1);
INSERT INTO columnar_test(a, b) VALUES (1, 2); INSERT INTO columnar_test(a, b) VALUES (1, 2);
TRUNCATE columnar_test; TRUNCATE columnar_test;

View File

@ -1,14 +1,6 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q
\endif
-- --
-- GRANT_ON_FOREIGN_SERVER_PROPAGATION -- GRANT_ON_FOREIGN_SERVER_PROPAGATION
-- We can't execute this file for PG12, as 'password_required' option for USER MAPPING -- 'password_required' option for USER MAPPING is introduced in PG13.
-- is introduced in PG13.
-- --
CREATE SCHEMA "grant on server"; CREATE SCHEMA "grant on server";
SET search_path TO "grant on server"; SET search_path TO "grant on server";

View File

@ -1,6 +0,0 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q

View File

@ -124,7 +124,7 @@ SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series
(1 row) (1 row)
SELECT * FROM read_intermediate_result('squares', 'csv') AS res (x int, x2 int); SELECT * FROM read_intermediate_result('squares', 'csv') AS res (x int, x2 int);
ERROR: invalid input syntax for integer: "PGCOPY" ERROR: invalid input syntax for type integer: "PGCOPY"
END; END;
-- try a composite type -- try a composite type
CREATE TYPE intermediate_results.square_type AS (x text, x2 int); CREATE TYPE intermediate_results.square_type AS (x text, x2 int);

View File

@ -295,13 +295,13 @@ starting permutation: s1-begin s1-select-from-t1-with-subquery s2-begin s2-updat
step s1-begin: step s1-begin:
BEGIN; BEGIN;
DEBUG: Creating router plan s1: DEBUG: Creating router plan
step s1-select-from-t1-with-subquery: step s1-select-from-t1-with-subquery:
SET client_min_messages TO DEBUG2; SET client_min_messages TO DEBUG2;
SELECT * FROM (SELECT * FROM test_table_1_rf1 FOR UPDATE) foo WHERE id = 1; SELECT * FROM (SELECT * FROM test_table_1_rf1 FOR UPDATE) foo WHERE id = 1;
RESET client_min_messages; RESET client_min_messages;
DEBUG: query has a single distribution column value: 1 s1: DEBUG: query has a single distribution column value: 1
id|val_1 id|val_1
--------------------------------------------------------------------- ---------------------------------------------------------------------
1| 2 1| 2

View File

@ -543,7 +543,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue)
AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text))
SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
ERROR: invalid input syntax for integer: "non-int" ERROR: invalid input syntax for type integer: "non-int"
ROLLBACK; ROLLBACK;
-- shardMinValue should be smaller than shardMaxValue -- shardMinValue should be smaller than shardMaxValue
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;

View File

@ -70,7 +70,7 @@ SELECT partmethod, partkey FROM pg_dist_partition
WHERE logicalrelid = 'table_to_distribute'::regclass; WHERE logicalrelid = 'table_to_distribute'::regclass;
partmethod | partkey partmethod | partkey
--------------------------------------------------------------------- ---------------------------------------------------------------------
h | {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} h | {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
(1 row) (1 row)
-- use a bad shard count -- use a bad shard count

View File

@ -2947,7 +2947,7 @@ Custom Scan (Citus Adaptive)
-> Insert on users_table_2_570028 citus_table_alias -> Insert on users_table_2_570028 citus_table_alias
Conflict Resolution: UPDATE Conflict Resolution: UPDATE
Conflict Arbiter Indexes: users_table_2_pkey_570028 Conflict Arbiter Indexes: users_table_2_pkey_570028
-> Seq Scan on users_table_2_570028 users_table_xxx -> Seq Scan on users_table_2_570028 users_table_2
EXPLAIN :default_analyze_flags execute p4(20,20); EXPLAIN :default_analyze_flags execute p4(20,20);
Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Task Count: 1 Task Count: 1
@ -2959,7 +2959,7 @@ Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
Conflict Arbiter Indexes: users_table_2_pkey_570028 Conflict Arbiter Indexes: users_table_2_pkey_570028
Tuples Inserted: 0 Tuples Inserted: 0
Conflicting Tuples: 0 Conflicting Tuples: 0
-> Seq Scan on users_table_2_570028 users_table_xxx (actual rows=0 loops=1) -> Seq Scan on users_table_2_570028 users_table_2 (actual rows=0 loops=1)
-- simple test to confirm we can fetch long (>4KB) plans -- simple test to confirm we can fetch long (>4KB) plans
EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM users_table_2 WHERE value_1::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X'; EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM users_table_2 WHERE value_1::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X';
Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
@ -2969,7 +2969,7 @@ Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
-> Task -> Task
Tuple data received from node: 0 bytes Tuple data received from node: 0 bytes
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on users_table_2_570028 users_table_xxx (actual rows=0 loops=1) -> Seq Scan on users_table_2_570028 users_table_2 (actual rows=0 loops=1)
Filter: ((value_1)::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X'::text) Filter: ((value_1)::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X'::text)
-- sorted explain analyze output -- sorted explain analyze output
CREATE TABLE explain_analyze_execution_time (a int); CREATE TABLE explain_analyze_execution_time (a int);
@ -3030,7 +3030,7 @@ WindowAgg (actual rows=1 loops=1)
Tasks Shown: One of 2 Tasks Shown: One of 2
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_xxx (actual rows=1 loops=1) -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
CREATE TABLE distributed_table_2(a int, b int); CREATE TABLE distributed_table_2(a int, b int);
SELECT create_distributed_table('distributed_table_2','a'); SELECT create_distributed_table('distributed_table_2','a');
@ -3051,7 +3051,7 @@ Limit (actual rows=1 loops=1)
Tasks Shown: One of 2 Tasks Shown: One of 2
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_xxx (actual rows=1 loops=1) -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
Task Count: 2 Task Count: 2
Tuple data received from nodes: 16 bytes Tuple data received from nodes: 16 bytes
Tasks Shown: One of 2 Tasks Shown: One of 2
@ -3062,7 +3062,7 @@ Limit (actual rows=1 loops=1)
-> Nested Loop (actual rows=1 loops=1) -> Nested Loop (actual rows=1 loops=1)
Join Filter: (distributed_table_2.b = intermediate_result.r) Join Filter: (distributed_table_2.b = intermediate_result.r)
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1) -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
-> Seq Scan on distributed_table_2_570034 distributed_table_xxx (actual rows=1 loops=1) -> Seq Scan on distributed_table_2_570034 distributed_table_2 (actual rows=1 loops=1)
EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subquery; EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subquery;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Task Count: 1 Task Count: 1
@ -3077,7 +3077,7 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Tasks Shown: One of 2 Tasks Shown: One of 2
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Seq Scan on distributed_table_1_570032 distributed_table_xxx (actual rows=1 loops=1) -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
CREATE TYPE multi_explain.int_wrapper_type AS (int_field int); CREATE TYPE multi_explain.int_wrapper_type AS (int_field int);
CREATE TABLE tbl (a int, b multi_explain.int_wrapper_type); CREATE TABLE tbl (a int, b multi_explain.int_wrapper_type);
SELECT create_distributed_table('tbl', 'a'); SELECT create_distributed_table('tbl', 'a');

View File

@ -5,16 +5,8 @@
-- --
-- It'd be nice to script generation of this file, but alas, that's -- It'd be nice to script generation of this file, but alas, that's
-- not done yet. -- not done yet.
-- differentiate the output file for pg11 and versions above, with regards to objects --
-- created per citus version depending on the postgres version. Upgrade tests verify the -- Upgrade tests verify the objects are added in citus_finish_pg_upgrade()
-- objects are added in citus_finish_pg_upgrade()
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven;
version_above_eleven
---------------------------------------------------------------------
t
(1 row)
SET citus.next_shard_id TO 580000; SET citus.next_shard_id TO 580000;
CREATE SCHEMA multi_extension; CREATE SCHEMA multi_extension;
SELECT $definition$ SELECT $definition$
@ -952,7 +944,7 @@ DELETE FROM pg_dist_shard WHERE shardid = 1;
CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id); CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id);
CREATE TABLE orders_2020_07_01 CREATE TABLE orders_2020_07_01
PARTITION OF e_transactions FOR VALUES IN (1,2,3); PARTITION OF e_transactions FOR VALUES IN (1,2,3);
INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}', 7, 's'); INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's');
SELECT SELECT
(metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11, (metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11,
(metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null (metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null

View File

@ -303,7 +303,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id', 'hash');
INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referenced_table VALUES(1, 1);
INSERT INTO referencing_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1);
UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referenced_table SET test_column = 10 WHERE id = 1;
ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350385" on table "referencing_table_xxxxxxx" ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_id_fkey_1350385" on table "referencing_table_xxxxxxx"
DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx". DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
BEGIN; BEGIN;
@ -343,7 +343,7 @@ INSERT INTO referenced_table VALUES(1, 1);
INSERT INTO referencing_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1);
BEGIN; BEGIN;
UPDATE referenced_table SET test_column = 20 WHERE id = 1; UPDATE referenced_table SET test_column = 20 WHERE id = 1;
ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350449" on table "referencing_table_xxxxxxx" ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_id_fkey_1350449" on table "referencing_table_xxxxxxx"
DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx". DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
UPDATE referencing_table SET id = 20 WHERE ref_id = 1; UPDATE referencing_table SET id = 20 WHERE ref_id = 1;
@ -404,7 +404,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id', 'hash');
(1 row) (1 row)
INSERT INTO referencing_table VALUES(null, 2); INSERT INTO referencing_table VALUES(null, 2);
ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350600" ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_id_fkey_1350600"
DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. DETAIL: MATCH FULL does not allow mixing of null and nonnull key values.
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
SELECT * FROM referencing_table; SELECT * FROM referencing_table;
@ -787,7 +787,7 @@ SELECT create_distributed_table('self_referencing_table1', 'id', 'hash');
INSERT INTO self_referencing_table1 VALUES(1, 1, 1); INSERT INTO self_referencing_table1 VALUES(1, 1, 1);
-- we expect this query to fail -- we expect this query to fail
INSERT INTO self_referencing_table1 VALUES(1, 2, 3); INSERT INTO self_referencing_table1 VALUES(1, 2, 3);
ERROR: insert or update on table "self_referencing_table1_1350640" violates foreign key constraint "self_referencing_table1_id_fkey_1350640" ERROR: insert or update on table "self_referencing_table1_1350640" violates foreign key constraint "self_referencing_table1_id_other_column_ref_fkey_1350640"
DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table1_1350640". DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table1_1350640".
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:xxxxx
-- verify that rows are actually inserted -- verify that rows are actually inserted

View File

@ -463,7 +463,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
--------------------------------------------------------------------- ---------------------------------------------------------------------
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 2 | s | f mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row) (1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
@ -562,7 +562,7 @@ SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass; SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass;
Constraint | Definition Constraint | Definition
--------------------------------------------------------------------- ---------------------------------------------------------------------
fk_test_2_col1_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) fk_test_2_col1_col2_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3)
(1 row) (1 row)
\c - - - :master_port \c - - - :master_port
@ -602,7 +602,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
--------------------------------------------------------------------- ---------------------------------------------------------------------
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 2 | s | f mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row) (1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;

View File

@ -451,7 +451,7 @@ SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1;
-- create default partition -- create default partition
CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT; CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT;
\d+ partitioning_test \d+ partitioning_test
Table "public.partitioning_test" Partitioned table "public.partitioning_test"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------------------------------------------------------------------- ---------------------------------------------------------------------
id | integer | | | | plain | | id | integer | | | | plain | |

View File

@ -119,7 +119,7 @@ SELECT generate_alter_table_attach_partition_command('date_partition_2007');
-- detach and attach the partition by the command generated by us -- detach and attach the partition by the command generated by us
\d+ date_partitioned_table \d+ date_partitioned_table
Table "public.date_partitioned_table" Partitioned table "public.date_partitioned_table"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------------------------------------------------------------------- ---------------------------------------------------------------------
id | integer | | | | plain | | id | integer | | | | plain | |
@ -136,7 +136,7 @@ SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_tabl
-- check that both partitions are visiable -- check that both partitions are visiable
\d+ date_partitioned_table \d+ date_partitioned_table
Table "public.date_partitioned_table" Partitioned table "public.date_partitioned_table"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------------------------------------------------------------------- ---------------------------------------------------------------------
id | integer | | | | plain | | id | integer | | | | plain | |
@ -160,7 +160,7 @@ SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_
-- the hierarcy is successfully created -- the hierarcy is successfully created
\d+ date_partitioned_table_100 \d+ date_partitioned_table_100
Table "public.date_partitioned_table_100" Partitioned table "public.date_partitioned_table_100"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------------------------------------------------------------------- ---------------------------------------------------------------------
id | integer | | | | plain | | id | integer | | | | plain | |
@ -187,7 +187,7 @@ SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_
-- the hierarcy is successfully broken -- the hierarcy is successfully broken
\d+ date_partitioned_table_100 \d+ date_partitioned_table_100
Table "public.date_partitioned_table_100" Partitioned table "public.date_partitioned_table_100"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------------------------------------------------------------------- ---------------------------------------------------------------------
id | integer | | | | plain | | id | integer | | | | plain | |
@ -243,7 +243,7 @@ SELECT public.generate_alter_table_attach_partition_command('child_2');
SET search_path = 'partition_parent_schema'; SET search_path = 'partition_parent_schema';
-- detach and attach the partition by the command generated by us -- detach and attach the partition by the command generated by us
\d+ parent_table \d+ parent_table
Table "partition_parent_schema.parent_table" Partitioned table "partition_parent_schema.parent_table"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------------------------------------------------------------------- ---------------------------------------------------------------------
id | integer | | not null | | plain | | id | integer | | not null | | plain | |
@ -260,7 +260,7 @@ SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'p
-- check that both partitions are visiable -- check that both partitions are visiable
\d+ parent_table \d+ parent_table
Table "partition_parent_schema.parent_table" Partitioned table "partition_parent_schema.parent_table"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------------------------------------------------------------------- ---------------------------------------------------------------------
id | integer | | not null | | plain | | id | integer | | not null | | plain | |

View File

@ -86,7 +86,7 @@ SELECT prune_using_both_values('pruning', 'tomato', 'rose');
SELECT debug_equality_expression('pruning'); SELECT debug_equality_expression('pruning');
debug_equality_expression debug_equality_expression
--------------------------------------------------------------------- ---------------------------------------------------------------------
{OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1} {OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1}
(1 row) (1 row)
-- print the initial ordering of shard intervals -- print the initial ordering of shard intervals

View File

@ -182,7 +182,7 @@ ERROR: cannot isolate tenant because "lineitem_streaming" has colocated tables
HINT: Use CASCADE option to isolate tenants for the colocated tables too. Example usage: isolate_tenant_to_new_shard('lineitem_streaming', '100', 'CASCADE') HINT: Use CASCADE option to isolate tenants for the colocated tables too. Example usage: isolate_tenant_to_new_shard('lineitem_streaming', '100', 'CASCADE')
-- check with an input not castable to bigint -- check with an input not castable to bigint
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 'abc', 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 'abc', 'CASCADE');
ERROR: invalid input syntax for integer: "abc" ERROR: invalid input syntax for type bigint: "abc"
SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE'); SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE');
isolate_tenant_to_new_shard isolate_tenant_to_new_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -499,9 +499,10 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
(1 row) (1 row)
ALTER TABLE t1 ADD CONSTRAINT t1_a_check CHECK(a > 2) NOT VALID; ALTER TABLE t1 ADD CONSTRAINT t1_a_check CHECK(a > 2) NOT VALID;
-- will error out with "ERROR: CHECK CONSTRAINT "t1_a_check" is violated by some row" -- will error out with
-- "ERROR: CHECK CONSTRAINT "t1_a_check" of relation "t1" is violated by some row"
ALTER TABLE t1 VALIDATE CONSTRAINT t1_a_check; ALTER TABLE t1 VALIDATE CONSTRAINT t1_a_check;
ERROR: check constraint "t1_a_check" is violated by some row ERROR: check constraint "t1_a_check" of relation "t1" is violated by some row
-- remove violating row -- remove violating row
DELETE FROM t1 where a = 1; DELETE FROM t1 where a = 1;
-- verify no rows in t1 -- verify no rows in t1
@ -512,7 +513,7 @@ SELECT * FROM t1;
-- this will still error out -- this will still error out
ALTER TABLE t1 VALIDATE CONSTRAINT t1_a_check; ALTER TABLE t1 VALIDATE CONSTRAINT t1_a_check;
ERROR: check constraint "t1_a_check" is violated by some row ERROR: check constraint "t1_a_check" of relation "t1" is violated by some row
-- The check will pass when the local copies are truncated -- The check will pass when the local copies are truncated
SELECT truncate_local_data_after_distributing_table('t1'); SELECT truncate_local_data_after_distributing_table('t1');
truncate_local_data_after_distributing_table truncate_local_data_after_distributing_table

View File

@ -86,15 +86,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id,
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Hash Join -> Hash Join
Hash Cond: ((partitioning_hash_join_test_xxx.id = partitioning_hash_test_xxx.id) AND (partitioning_hash_join_test_xxx.subid = partitioning_hash_test_xxx.subid)) Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid))
-> Append -> Append
-> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2
-> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3
-> Hash -> Hash
-> Append -> Append
-> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1
-> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2
(15 rows) (15 rows)
-- set partition-wise join on and parallel to off -- set partition-wise join on and parallel to off
@ -124,15 +124,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id,
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Hash Join -> Hash Join
Hash Cond: ((partitioning_hash_test_xxx.id = partitioning_hash_join_test_xxx.id) AND (partitioning_hash_test_xxx.subid = partitioning_hash_join_test_xxx.subid)) Hash Cond: ((partitioning_hash_test.id = partitioning_hash_join_test.id) AND (partitioning_hash_test.subid = partitioning_hash_join_test.subid))
-> Append -> Append
-> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1
-> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2
-> Hash -> Hash
-> Append -> Append
-> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2
-> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3
(15 rows) (15 rows)
-- note that partition-wise joins only work when partition key is in the join -- note that partition-wise joins only work when partition key is in the join
@ -148,15 +148,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id)
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Hash Join -> Hash Join
Hash Cond: (partitioning_hash_test_xxx.id = partitioning_hash_join_test_xxx.id) Hash Cond: (partitioning_hash_test.id = partitioning_hash_join_test.id)
-> Append -> Append
-> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1
-> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2
-> Hash -> Hash
-> Append -> Append
-> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2
-> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3
(15 rows) (15 rows)
-- reset partition-wise join -- reset partition-wise join

View File

@ -86,15 +86,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id,
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Hash Join -> Hash Join
Hash Cond: ((partitioning_hash_join_test_xxx.id = partitioning_hash_test_xxx.id) AND (partitioning_hash_join_test_xxx.subid = partitioning_hash_test_xxx.subid)) Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid))
-> Append -> Append
-> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2
-> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3
-> Hash -> Hash
-> Append -> Append
-> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1
-> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2
(15 rows) (15 rows)
-- set partition-wise join on and parallel to off -- set partition-wise join on and parallel to off
@ -124,15 +124,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id,
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Hash Join -> Hash Join
Hash Cond: ((partitioning_hash_join_test_xxx.id = partitioning_hash_test_xxx.id) AND (partitioning_hash_join_test_xxx.subid = partitioning_hash_test_xxx.subid)) Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid))
-> Append -> Append
-> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2
-> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3
-> Hash -> Hash
-> Append -> Append
-> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1
-> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2
(15 rows) (15 rows)
-- note that partition-wise joins only work when partition key is in the join -- note that partition-wise joins only work when partition key is in the join
@ -148,15 +148,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id)
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Hash Join -> Hash Join
Hash Cond: (partitioning_hash_join_test_xxx.id = partitioning_hash_test_xxx.id) Hash Cond: (partitioning_hash_join_test.id = partitioning_hash_test.id)
-> Append -> Append
-> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2
-> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3
-> Hash -> Hash
-> Append -> Append
-> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1
-> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2
(15 rows) (15 rows)
-- reset partition-wise join -- reset partition-wise join

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
\gset
\if :server_version_above_eleven
\else
\q
\endif
SET citus.shard_replication_factor to 1; SET citus.shard_replication_factor to 1;
SET citus.next_shard_id TO 60000; SET citus.next_shard_id TO 60000;
SET citus.next_placement_id TO 60000; SET citus.next_placement_id TO 60000;
@ -667,4 +660,4 @@ DROP USER read_access;
drop schema test_pg12 cascade; drop schema test_pg12 cascade;
NOTICE: drop cascades to 16 other objects NOTICE: drop cascades to 16 other objects
\set VERBOSITY default \set VERBOSITY default
SET citus.shard_replication_factor to 2; SET citus.shard_replication_factor to 2;

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q
\endif
create schema test_pg13; create schema test_pg13;
set search_path to test_pg13; set search_path to test_pg13;
SET citus.shard_replication_factor to 1; SET citus.shard_replication_factor to 1;

View File

@ -1,6 +0,0 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q
\endif
CREATE SCHEMA "statistics'TestTarget"; CREATE SCHEMA "statistics'TestTarget";
SET search_path TO "statistics'TestTarget"; SET search_path TO "statistics'TestTarget";
SET citus.next_shard_id TO 980000; SET citus.next_shard_id TO 980000;

View File

@ -1,6 +0,0 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q
\endif
CREATE TABLE with_ties_table (a INT, b INT); CREATE TABLE with_ties_table (a INT, b INT);
SELECT create_distributed_table('with_ties_table', 'a'); SELECT create_distributed_table('with_ties_table', 'a');
create_distributed_table create_distributed_table

View File

@ -1,6 +0,0 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q

View File

@ -1,6 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 12 AS have_table_am
\gset
CREATE TEMPORARY TABLE output (line text); CREATE TEMPORARY TABLE output (line text);
CREATE SCHEMA dumper; CREATE SCHEMA dumper;
SET search_path TO 'dumper'; SET search_path TO 'dumper';
@ -29,17 +26,9 @@ COPY data TO STDOUT;
4 {} 4 {}
2 {$":9} 2 {$":9}
2 {$":9} 2 {$":9}
\if :have_table_am
CREATE TABLE simple_columnar(i INT, t TEXT) USING columnar; CREATE TABLE simple_columnar(i INT, t TEXT) USING columnar;
\else
CREATE TABLE simple_columnar(i INT, t TEXT);
\endif
INSERT INTO simple_columnar VALUES (1, 'one'), (2, 'two'); INSERT INTO simple_columnar VALUES (1, 'one'), (2, 'two');
\if :have_table_am
CREATE TABLE dist_columnar(i INT, t TEXT) USING columnar; CREATE TABLE dist_columnar(i INT, t TEXT) USING columnar;
\else
CREATE TABLE dist_columnar(i INT, t TEXT);
\endif
SELECT create_distributed_table('dist_columnar', 'i'); SELECT create_distributed_table('dist_columnar', 'i');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -123,13 +112,8 @@ COPY dumper."weird.table" ("data.jsonb", "?empty(") TO STDOUT WITH (format csv,
data.jsonb,?empty( data.jsonb,?empty(
"{""weird"": {""table"": ""{:""}}","" "{""weird"": {""table"": ""{:""}}",""
"{""?\"""": []}","" "{""?\"""": []}",""
-- If server supports table access methods, check to be sure that the -- Check to be sure that the recreated table is still columnar.
-- recreated table is still columnar. Otherwise, just return true.
\if :have_table_am
\set is_columnar '(SELECT amname=''columnar'' from pg_am where relam=oid)' \set is_columnar '(SELECT amname=''columnar'' from pg_am where relam=oid)'
\else
\set is_columnar TRUE
\endif
SELECT :is_columnar AS check_columnar FROM pg_class WHERE oid='simple_columnar'::regclass; SELECT :is_columnar AS check_columnar FROM pg_class WHERE oid='simple_columnar'::regclass;
check_columnar check_columnar
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -1,11 +1,3 @@
-- print whether we're using version > 12 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS version_above_twelve;
version_above_twelve
---------------------------------------------------------------------
t
(1 row)
CREATE SCHEMA "extension'test"; CREATE SCHEMA "extension'test";
-- use a schema name with escape character -- use a schema name with escape character
SET search_path TO "extension'test"; SET search_path TO "extension'test";

View File

@ -1,643 +0,0 @@
-- print whether we're using version > 12 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS version_above_twelve;
version_above_twelve
---------------------------------------------------------------------
f
(1 row)
CREATE SCHEMA "extension'test";
-- use a schema name with escape character
SET search_path TO "extension'test";
SET client_min_messages TO WARNING;
-- create an extension on the given search_path
-- the extension is on contrib, so should be avaliable for the regression tests
CREATE EXTENSION seg;
-- make sure that both the schema and the extension is distributed
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg');
count
---------------------------------------------------------------------
1
(1 row)
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test');
count
---------------------------------------------------------------------
1
(1 row)
CREATE TABLE test_table (key int, value seg);
SELECT create_distributed_table('test_table', 'key');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- make sure that the table is also distributed now
SELECT count(*) from pg_dist_partition where logicalrelid='extension''test.test_table'::regclass;
count
---------------------------------------------------------------------
1
(1 row)
CREATE TYPE two_segs AS (seg_1 seg, seg_2 seg);
-- verify that the type that depends on the extension is also marked as distributed
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_type WHERE typname = 'two_segs' AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test'));
count
---------------------------------------------------------------------
1
(1 row)
-- now try to run CREATE EXTENSION within a transction block, all should work fine
BEGIN;
CREATE EXTENSION isn WITH SCHEMA public;
-- now, try create a reference table relying on the data types
-- this should not succeed as we do not distribute extension commands within transaction blocks
CREATE TABLE dist_table (key int, value public.issn);
SELECT create_distributed_table('dist_table', 'key');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- we can even run queries (sequentially) over the distributed table
SELECT * FROM dist_table;
key | value
---------------------------------------------------------------------
(0 rows)
INSERT INTO dist_table VALUES (1, public.issn('1436-4522'));
INSERT INTO dist_table SELECT * FROM dist_table RETURNING *;
key | value
---------------------------------------------------------------------
1 | 1436-4522
(1 row)
COMMIT;
-- make sure that the extension is distributed even if we run create extension in a transaction block
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn');
count
---------------------------------------------------------------------
1
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
CREATE TABLE ref_table (a public.issn);
-- now, create a reference table relying on the data types
SELECT create_reference_table('ref_table');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- now, drop the extension, recreate it with an older version and update it to latest version
DROP EXTENSION isn CASCADE;
CREATE EXTENSION isn WITH VERSION "1.1";
-- before updating the version, ensure the current version
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.1)
(localhost,57638,t,1.1)
(2 rows)
-- now, update to a newer version
ALTER EXTENSION isn UPDATE TO '1.2';
-- show that ALTER EXTENSION is propagated
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.2)
(localhost,57638,t,1.2)
(2 rows)
-- before changing the schema, ensure the current schmea
SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,extension'test)
(localhost,57638,t,extension'test)
(2 rows)
-- now change the schema
ALTER EXTENSION isn SET SCHEMA public;
-- switch back to public schema as we set extension's schema to public
SET search_path TO public;
-- make sure that the extension is distributed
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn');
count
---------------------------------------------------------------------
1
(1 row)
-- show that the ALTER EXTENSION command is propagated
SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,public)
(localhost,57638,t,public)
(2 rows)
-- drop the extension finally
DROP EXTENSION isn CASCADE;
-- now make sure that the reference tables depending on an extension can be succesfully created.
-- we should also ensure that we replicate this reference table (and hence the extension)
-- to new nodes after calling master_activate_node.
-- now, first drop seg and existing objects before next test
DROP EXTENSION seg CASCADE;
-- but as we have only 2 ports in postgresql tests, let's remove one of the nodes first
-- before remove, first remove the existing relations (due to the other tests)
DROP SCHEMA "extension'test" CASCADE;
SELECT 1 from master_remove_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- then create the extension
CREATE EXTENSION seg;
-- show that the extension is created on existing worker
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(1 row)
SELECT workers.result = pg_extension.extversion AS same_version
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg';
same_version
---------------------------------------------------------------------
t
(1 row)
-- now create the reference table
CREATE TABLE ref_table_2 (x seg);
SELECT create_reference_table('ref_table_2');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- we also add an old style extension from before extensions which we upgrade to an extension
-- by exercising it before the add node we verify it will create the extension (without upgrading)
-- it on the new worker as well. For this we use the dict_int extension which is in contrib,
-- supports FROM unpackaged, and is relatively small
-- create objects for dict_int manually so we can upgrade from unpacked
CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT;
CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT;
CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init );
SELECT run_command_on_workers($$
CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init );
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE TEXT SEARCH TEMPLATE")
(1 row)
CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template);
COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers';
CREATE EXTENSION dict_int FROM unpackaged;
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(1 row)
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.0)
(1 row)
-- adding the second node will fail as the text search template needs to be created manually
SELECT 1 from master_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- create the text search template manually on the worker
\c - - - :worker_2_port
SET citus.enable_metadata_sync TO false;
CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT;
ERROR: function "dintdict_init" already exists with same argument types
CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT;
ERROR: function "dintdict_lexize" already exists with same argument types
CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init );
ERROR: duplicate key value violates unique constraint "pg_ts_template_tmplname_index"
DETAIL: Key (tmplname, tmplnamespace)=(intdict_template, 2200) already exists.
RESET citus.enable_metadata_sync;
\c - - - :master_port
SET client_min_messages TO WARNING;
-- add the second node now
SELECT 1 from master_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- show that the extension is created on both existing and new node
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
SELECT workers.result = pg_extension.extversion AS same_version
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg';
same_version
---------------------------------------------------------------------
t
t
(2 rows)
-- check for the unpackaged extension to be created correctly
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.0)
(localhost,57638,t,1.0)
(2 rows)
-- and similarly check for the reference table
select count(*) from pg_dist_partition where partmethod='n' and logicalrelid='ref_table_2'::regclass;
count
---------------------------------------------------------------------
1
(1 row)
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='ref_table_2'::regclass;
count
---------------------------------------------------------------------
1
(1 row)
DROP TABLE ref_table_2;
-- now test create extension in another transaction block but rollback this time
BEGIN;
CREATE EXTENSION isn WITH VERSION '1.1' SCHEMA public;
ROLLBACK;
-- at the end of the transaction block, we did not create isn extension in coordinator or worker nodes as we rollback'ed
-- make sure that the extension is not distributed
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn');
count
---------------------------------------------------------------------
0
(1 row)
-- and the extension does not exist on workers
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
-- give a notice for the following commands saying that it is not
-- propagated to the workers. the user should run it manually on the workers
CREATE TABLE t1 (A int);
CREATE VIEW v1 AS select * from t1;
ALTER EXTENSION seg ADD VIEW v1;
ALTER EXTENSION seg DROP VIEW v1;
DROP VIEW v1;
DROP TABLE t1;
-- drop multiple extensions at the same time
CREATE EXTENSION isn WITH VERSION '1.1' SCHEMA public;
-- let's create another extension locally
set citus.enable_ddl_propagation to 'off';
CREATE EXTENSION pg_buffercache;
set citus.enable_ddl_propagation to 'on';
DROP EXTENSION pg_buffercache, isn CASCADE;
SELECT count(*) FROM pg_extension WHERE extname IN ('pg_buffercache', 'isn');
count
---------------------------------------------------------------------
0
(1 row)
-- drop extension should just work
DROP EXTENSION seg CASCADE;
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg');
count
---------------------------------------------------------------------
0
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'seg'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
-- make sure that the extension is not avaliable anymore as a distributed object
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn'));
count
---------------------------------------------------------------------
0
(1 row)
CREATE SCHEMA "extension'test";
SET search_path TO "extension'test";
-- check restriction for sequential execution
-- enable it and see that create command errors but continues its execution by changing citus.multi_shard_modify_mode TO 'off
BEGIN;
SET LOCAL citus.create_object_propagation TO deferred;
CREATE TABLE some_random_table (a int);
SELECT create_distributed_table('some_random_table', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE EXTENSION seg;
CREATE TABLE some_random_table_2 (a int, b seg);
SELECT create_distributed_table('some_random_table_2', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
ROLLBACK;
-- show that the CREATE EXTENSION command propagated even if the transaction
-- block is rollbacked, that's a shortcoming of dependency creation logic
SELECT COUNT(DISTINCT workers.result)
FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers;
count
---------------------------------------------------------------------
1
(1 row)
-- drop the schema and all the objects
DROP SCHEMA "extension'test" CASCADE;
-- recreate for the next tests
CREATE SCHEMA "extension'test";
-- use a schema name with escape character
SET search_path TO "extension'test";
-- remove the node, we'll add back again
SELECT 1 from master_remove_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- Test extension function incorrect distribution argument
CREATE TABLE test_extension_function(col varchar);
CREATE EXTENSION seg;
-- Missing distribution argument
SELECT create_distributed_function('seg_in(cstring)');
ERROR: Extension functions(seg_in) without distribution argument are not supported.
-- Missing colocation argument
SELECT create_distributed_function('seg_in(cstring)', '$1');
ERROR: cannot distribute the function "seg_in" since there is no table to colocate with
HINT: Provide a distributed table via "colocate_with" option to create_distributed_function()
-- Incorrect distribution argument
SELECT create_distributed_function('seg_in(cstring)', '$2', colocate_with:='test_extension_function');
ERROR: cannot distribute the function "seg_in" since the distribution argument is not valid
HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function()
-- Colocated table is not distributed
SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function');
ERROR: relation test_extension_function is not distributed
DROP EXTENSION seg;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('test_extension_function', 'col', colocate_with := 'none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- now, create a type that depends on another type, which
-- finally depends on an extension
BEGIN;
CREATE EXTENSION seg;
CREATE EXTENSION isn;
CREATE TYPE test_type AS (a int, b seg);
CREATE TYPE test_type_2 AS (a int, b test_type);
CREATE TABLE t2 (a int, b test_type_2, c issn);
SELECT create_distributed_table('t2', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TYPE test_type_3 AS (a int, b test_type, c issn);
CREATE TABLE t3 (a int, b test_type_3);
SELECT create_reference_table('t3');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- Distribute an extension-function
SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function');
create_distributed_function
---------------------------------------------------------------------
(1 row)
COMMIT;
-- Check the pg_dist_object
SELECT pg_proc.proname as DistributedFunction
FROM pg_catalog.pg_dist_object, pg_proc
WHERE pg_proc.proname = 'seg_in' and
pg_proc.oid = pg_catalog.pg_dist_object.objid and
classid = 'pg_proc'::regclass;
distributedfunction
---------------------------------------------------------------------
seg_in
(1 row)
SELECT run_command_on_workers($$
SELECT count(*)
FROM pg_catalog.pg_dist_object, pg_proc
WHERE pg_proc.proname = 'seg_in' and
pg_proc.oid = pg_catalog.pg_dist_object.objid and
classid = 'pg_proc'::regclass;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(1 row)
-- add the node back
SELECT 1 from master_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- make sure that both extensions are created on both nodes
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid IN (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn'));
count
---------------------------------------------------------------------
2
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname IN ('seg', 'isn')$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,2)
(localhost,57638,t,2)
(2 rows)
-- Check the pg_dist_object on the both nodes
SELECT run_command_on_workers($$
SELECT count(*)
FROM pg_catalog.pg_dist_object, pg_proc
WHERE pg_proc.proname = 'seg_in' and
pg_proc.oid = pg_catalog.pg_dist_object.objid and
classid = 'pg_proc'::regclass;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
DROP EXTENSION seg CASCADE;
-- Recheck the pg_dist_object
SELECT pg_proc.proname as DistributedFunction
FROM pg_catalog.pg_dist_object, pg_proc
WHERE pg_proc.proname = 'seg_in' and
pg_proc.oid = pg_catalog.pg_dist_object.objid and
classid = 'pg_proc'::regclass;
distributedfunction
---------------------------------------------------------------------
(0 rows)
SELECT run_command_on_workers($$
SELECT count(*)
FROM pg_catalog.pg_dist_object, pg_proc
WHERE pg_proc.proname = 'seg_in' and
pg_proc.oid = pg_catalog.pg_dist_object.objid and
classid = 'pg_proc'::regclass;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
-- Distribute an extension-function where extension is not in pg_dist_object
SET citus.enable_ddl_propagation TO false;
CREATE EXTENSION seg;
SET citus.enable_ddl_propagation TO true;
-- Check the extension in pg_dist_object
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND
objid = (SELECT oid FROM pg_extension WHERE extname = 'seg');
count
---------------------------------------------------------------------
0
(1 row)
SELECT run_command_on_workers($$
SELECT count(*)
FROM pg_catalog.pg_dist_object, pg_proc
WHERE pg_proc.proname = 'seg_in' and
pg_proc.oid = pg_catalog.pg_dist_object.objid and
classid = 'pg_proc'::regclass;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function');
create_distributed_function
---------------------------------------------------------------------
(1 row)
-- Recheck the extension in pg_dist_object
SELECT count(*) FROM pg_catalog.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND
objid = (SELECT oid FROM pg_extension WHERE extname = 'seg');
count
---------------------------------------------------------------------
1
(1 row)
SELECT pg_proc.proname as DistributedFunction
FROM pg_catalog.pg_dist_object, pg_proc
WHERE pg_proc.proname = 'seg_in' and
pg_proc.oid = pg_catalog.pg_dist_object.objid and
classid = 'pg_proc'::regclass;
distributedfunction
---------------------------------------------------------------------
seg_in
(1 row)
SELECT run_command_on_workers($$
SELECT count(*)
FROM pg_catalog.pg_dist_object, pg_proc
WHERE pg_proc.proname = 'seg_in' and
pg_proc.oid = pg_catalog.pg_dist_object.objid and
classid = 'pg_proc'::regclass;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
DROP EXTENSION seg;
DROP TABLE test_extension_function;
-- Test extension function altering distribution argument
BEGIN;
SET citus.shard_replication_factor = 1;
SET citus.multi_shard_modify_mode TO sequential;
CREATE TABLE test_extension_function(col1 float8[], col2 float8[]);
SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE EXTENSION cube;
SELECT create_distributed_function('cube(float8[], float8[])', '$1', 'test_extension_function');
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT distribution_argument_index FROM pg_catalog.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND
objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8');
distribution_argument_index
---------------------------------------------------------------------
0
(1 row)
SELECT create_distributed_function('cube(float8[], float8[])', '$2', 'test_extension_function');
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT distribution_argument_index FROM pg_catalog.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND
objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8');
distribution_argument_index
---------------------------------------------------------------------
1
(1 row)
ROLLBACK;
-- Postgres already doesn't allow creating extensions in temp schema but
-- let's have a test for that to track any furher changes in postgres.
DROP EXTENSION isn CASCADE;
CREATE EXTENSION isn WITH SCHEMA pg_temp;
ERROR: schema "pg_temp" does not exist
-- drop the schema and all the objects
DROP SCHEMA "extension'test" CASCADE;

View File

@ -4,9 +4,6 @@ CREATE SCHEMA sqlsmith_failures;
SET search_path TO sqlsmith_failures, public; SET search_path TO sqlsmith_failures, public;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 1280000; SET citus.next_shard_id TO 1280000;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
\gset
begin; begin;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
create table countries( create table countries(
@ -36,7 +33,6 @@ select create_distributed_table('orgs', 'id');
(1 row) (1 row)
\if :server_version_above_eleven
-- pg12 and above support generated columns -- pg12 and above support generated columns
create table users ( create table users (
id bigserial id bigserial
@ -47,18 +43,6 @@ create table users (
, score bigint generated always as (id + country_id) stored , score bigint generated always as (id + country_id) stored
, primary key (org_id, id) , primary key (org_id, id)
); );
\else
-- pg11 and below don't have generated columns, use a normal column
create table users (
id bigserial
, org_id bigint references orgs(id)
, name text
, created_at timestamptz default now()
, country_id int -- references countries(id)
, score bigint
, primary key (org_id, id)
);
\endif
select create_distributed_table('users', 'org_id'); select create_distributed_table('users', 'org_id');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -157,12 +157,12 @@ SELECT * FROM test_matview;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'events%' ORDER BY logicalrelid::text; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'events%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
--------------------------------------------------------------------- ---------------------------------------------------------------------
events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390012 | s | f
events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390012 | s | f
events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390012 | s | f
events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390013 | c | f events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390013 | c | f
events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390013 | c | f events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390013 | c | f
events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390013 | c | f events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390013 | c | f
(6 rows) (6 rows)
SELECT count(*) > 0 FROM pg_dist_node; SELECT count(*) > 0 FROM pg_dist_node;

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
\gset
\if :server_version_above_eleven
\else
\q
\endif
SET citus.shard_replication_factor to 1; SET citus.shard_replication_factor to 1;
SET citus.next_shard_id TO 60000; SET citus.next_shard_id TO 60000;
SET citus.next_placement_id TO 60000; SET citus.next_placement_id TO 60000;

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
\gset
\if :server_version_above_eleven
\else
\q
\endif
SET search_path TO upgrade_columnar, public; SET search_path TO upgrade_columnar, public;
-- test we retained data -- test we retained data
SELECT * FROM test_retains_data ORDER BY a; SELECT * FROM test_retains_data ORDER BY a;

View File

@ -1,6 +0,0 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_eleven
\gset
\if :server_version_above_eleven
\else
\q

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
\gset
\if :server_version_above_eleven
\else
\q
\endif
-- Test if relying on topological sort of the objects, not their names, works -- Test if relying on topological sort of the objects, not their names, works
-- fine when re-creating objects during pg_upgrade. -- fine when re-creating objects during pg_upgrade.
ALTER SCHEMA public RENAME TO citus_schema; ALTER SCHEMA public RENAME TO citus_schema;

View File

@ -1,11 +1,3 @@
-- print version above 11 (eg. 12 and above)
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven;
version_above_eleven
---------------------------------------------------------------------
t
(1 row)
-- list all postgres objects belonging to the citus extension -- list all postgres objects belonging to the citus extension
SELECT pg_catalog.pg_describe_object(classid, objid, 0) AS description SELECT pg_catalog.pg_describe_object(classid, objid, 0) AS description
FROM pg_catalog.pg_depend, pg_catalog.pg_extension e FROM pg_catalog.pg_depend, pg_catalog.pg_extension e

View File

@ -24,7 +24,7 @@ SELECT create_distributed_table('customer_copy_hash', 'c_custkey', shard_count:=
COPY customer_copy_hash FROM STDIN; COPY customer_copy_hash FROM STDIN;
-- Test syntax error -- Test syntax error
COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; COPY customer_copy_hash (c_custkey,c_name) FROM STDIN;
ERROR: invalid input syntax for integer: "1,customer1" ERROR: invalid input syntax for type integer: "1,customer1"
CONTEXT: COPY customer_copy_hash, line 1, column c_custkey: "1,customer1" CONTEXT: COPY customer_copy_hash, line 1, column c_custkey: "1,customer1"
-- Test invalid option -- Test invalid option
COPY customer_copy_hash (c_custkey,c_name) FROM STDIN (append_to_shard xxxxx); COPY customer_copy_hash (c_custkey,c_name) FROM STDIN (append_to_shard xxxxx);
@ -267,7 +267,7 @@ SET citus.shard_replication_factor TO 2;
BEGIN; BEGIN;
SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset
COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid);
ERROR: invalid input syntax for integer: "notinteger" ERROR: invalid input syntax for type integer: "notinteger"
CONTEXT: COPY customer_copy_append, line 3, column c_custkey: "notinteger" CONTEXT: COPY customer_copy_append, line 3, column c_custkey: "notinteger"
END; END;
-- Test that no shard is created for failing copy -- Test that no shard is created for failing copy
@ -972,7 +972,7 @@ SELECT * FROM copy_jsonb ORDER BY key;
-- JSONB parsing error without validation: no line number -- JSONB parsing error without validation: no line number
\COPY copy_jsonb (key, value) FROM STDIN \COPY copy_jsonb (key, value) FROM STDIN
ERROR: invalid input syntax for json ERROR: invalid input syntax for type json
DETAIL: The input string ended unexpectedly. DETAIL: The input string ended unexpectedly.
TRUNCATE copy_jsonb; TRUNCATE copy_jsonb;
-- JSONB when there is a complex column should work. Complex columns force -- JSONB when there is a complex column should work. Complex columns force
@ -1019,7 +1019,7 @@ SELECT * FROM copy_jsonb ORDER BY key;
-- JSONB parsing error with validation: should see line number -- JSONB parsing error with validation: should see line number
\COPY copy_jsonb (key, value) FROM STDIN \COPY copy_jsonb (key, value) FROM STDIN
ERROR: invalid input syntax for json ERROR: invalid input syntax for type json
DETAIL: The input string ended unexpectedly. DETAIL: The input string ended unexpectedly.
CONTEXT: JSON data, line 1: {"r":255,"g":0,"b":0 CONTEXT: JSON data, line 1: {"r":255,"g":0,"b":0
COPY copy_jsonb, line 1, column value: "{"r":255,"g":0,"b":0" COPY copy_jsonb, line 1, column value: "{"r":255,"g":0,"b":0"

View File

@ -1,7 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven;
\gset
CREATE SCHEMA alter_distributed_table; CREATE SCHEMA alter_distributed_table;
SET search_path TO alter_distributed_table; SET search_path TO alter_distributed_table;
SET citus.shard_count TO 4; SET citus.shard_count TO 4;
@ -137,14 +133,12 @@ SELECT alter_distributed_table('col_with_ref_to_ref', shard_count:=10, cascade_t
SELECT alter_distributed_table('col_with_ref_to_dist', shard_count:=6, cascade_to_colocated:=true); SELECT alter_distributed_table('col_with_ref_to_dist', shard_count:=6, cascade_to_colocated:=true);
\if :server_version_above_eleven
-- test altering columnar table -- test altering columnar table
CREATE TABLE columnar_table (a INT) USING columnar; CREATE TABLE columnar_table (a INT) USING columnar;
SELECT create_distributed_table('columnar_table', 'a', colocate_with:='none'); SELECT create_distributed_table('columnar_table', 'a', colocate_with:='none');
SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHERE table_name::text = 'columnar_table'; SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHERE table_name::text = 'columnar_table';
SELECT alter_distributed_table('columnar_table', shard_count:=6); SELECT alter_distributed_table('columnar_table', shard_count:=6);
SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHERE table_name::text = 'columnar_table'; SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHERE table_name::text = 'columnar_table';
\endif
-- test complex cascade operations -- test complex cascade operations

View File

@ -4,14 +4,6 @@ CREATE TABLE alter_am_pg_version_table (a INT);
SELECT alter_table_set_access_method('alter_am_pg_version_table', 'columnar'); SELECT alter_table_set_access_method('alter_am_pg_version_table', 'columnar');
DROP TABLE alter_am_pg_version_table; DROP TABLE alter_am_pg_version_table;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
\gset
\if :server_version_above_eleven
\else
\q
\endif
CREATE SCHEMA alter_table_set_access_method; CREATE SCHEMA alter_table_set_access_method;
SET search_path TO alter_table_set_access_method; SET search_path TO alter_table_set_access_method;

View File

@ -2,10 +2,6 @@
-- Test the TRUNCATE TABLE command for columnar tables. -- Test the TRUNCATE TABLE command for columnar tables.
-- --
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
-- CREATE a columnar table, fill with some data -- -- CREATE a columnar table, fill with some data --
CREATE TABLE columnar_truncate_test (a int, b int) USING columnar; CREATE TABLE columnar_truncate_test (a int, b int) USING columnar;
CREATE TABLE columnar_truncate_test_second (a int, b int) USING columnar; CREATE TABLE columnar_truncate_test_second (a int, b int) USING columnar;

Some files were not shown because too many files have changed in this diff Show More