Remove 9.6 (#2554)

Removes support and code for PostgreSQL 9.6

cr: @velioglu
pull/2585/head
Jason Petersen 2019-01-16 13:11:24 -07:00 committed by GitHub
parent 0de756559c
commit 339e6e661e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
62 changed files with 83 additions and 14704 deletions

1
.gitattributes vendored
View File

@ -26,7 +26,6 @@ configure -whitespace
# except these exceptions... # except these exceptions...
src/backend/distributed/utils/citus_outfuncs.c -citus-style src/backend/distributed/utils/citus_outfuncs.c -citus-style
src/backend/distributed/utils/ruleutils_96.c -citus-style
src/backend/distributed/utils/ruleutils_10.c -citus-style src/backend/distributed/utils/ruleutils_10.c -citus-style
src/backend/distributed/utils/ruleutils_11.c -citus-style src/backend/distributed/utils/ruleutils_11.c -citus-style
src/include/distributed/citus_nodes.h -citus-style src/include/distributed/citus_nodes.h -citus-style

2
configure vendored
View File

@ -2530,7 +2530,7 @@ if test -z "$version_num"; then
as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5 as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5
fi fi
if test "$version_num" != '9.6' -a "$version_num" != '10' -a "$version_num" != '11'; then if test "$version_num" != '10' -a "$version_num" != '11'; then
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5 as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
else else
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5

View File

@ -74,7 +74,7 @@ if test -z "$version_num"; then
AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.]) AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.])
fi fi
if test "$version_num" != '9.6' -a "$version_num" != '10' -a "$version_num" != '11'; then if test "$version_num" != '10' -a "$version_num" != '11'; then
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
else else
AC_MSG_NOTICE([building against PostgreSQL $version_num]) AC_MSG_NOTICE([building against PostgreSQL $version_num])

View File

@ -1187,18 +1187,14 @@ CreateTruncateTrigger(Oid relationId)
/* /*
* RegularTable function returns true if given table's relation kind is RELKIND_RELATION * RegularTable function returns true if given table's relation kind is RELKIND_RELATION
* (or RELKIND_PARTITIONED_TABLE for PG >= 10), otherwise it returns false. * or RELKIND_PARTITIONED_TABLE otherwise it returns false.
*/ */
bool bool
RegularTable(Oid relationId) RegularTable(Oid relationId)
{ {
char relationKind = get_rel_relkind(relationId); char relationKind = get_rel_relkind(relationId);
#if (PG_VERSION_NUM >= 100000)
if (relationKind == RELKIND_RELATION || relationKind == RELKIND_PARTITIONED_TABLE) if (relationKind == RELKIND_RELATION || relationKind == RELKIND_PARTITIONED_TABLE)
#else
if (relationKind == RELKIND_RELATION)
#endif
{ {
return true; return true;
} }
@ -1386,12 +1382,11 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
/* /*
* RelationUsesIdentityColumns returns whether a given relation uses the SQL * RelationUsesIdentityColumns returns whether a given relation uses the SQL
* GENERATED ... AS IDENTITY features supported as of PostgreSQL 10. * GENERATED ... AS IDENTITY features introduced as of PostgreSQL 10.
*/ */
static bool static bool
RelationUsesIdentityColumns(TupleDesc relationDesc) RelationUsesIdentityColumns(TupleDesc relationDesc)
{ {
#if (PG_VERSION_NUM >= 100000)
int attributeIndex = 0; int attributeIndex = 0;
for (attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++) for (attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++)
@ -1403,7 +1398,6 @@ RelationUsesIdentityColumns(TupleDesc relationDesc)
return true; return true;
} }
} }
#endif
return false; return false;
} }

View File

@ -424,7 +424,6 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
} }
/* initialize copy state to read from COPY data source */ /* initialize copy state to read from COPY data source */
#if (PG_VERSION_NUM >= 100000)
copyState = BeginCopyFrom(NULL, copyState = BeginCopyFrom(NULL,
copiedDistributedRelation, copiedDistributedRelation,
copyStatement->filename, copyStatement->filename,
@ -432,13 +431,6 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
NULL, NULL,
copyStatement->attlist, copyStatement->attlist,
copyStatement->options); copyStatement->options);
#else
copyState = BeginCopyFrom(copiedDistributedRelation,
copyStatement->filename,
copyStatement->is_program,
copyStatement->attlist,
copyStatement->options);
#endif
/* set up callback to identify error line number */ /* set up callback to identify error line number */
errorCallback.callback = CopyFromErrorCallback; errorCallback.callback = CopyFromErrorCallback;
@ -533,7 +525,6 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId)
(ShardConnections *) palloc0(sizeof(ShardConnections)); (ShardConnections *) palloc0(sizeof(ShardConnections));
/* initialize copy state to read from COPY data source */ /* initialize copy state to read from COPY data source */
#if (PG_VERSION_NUM >= 100000)
CopyState copyState = BeginCopyFrom(NULL, CopyState copyState = BeginCopyFrom(NULL,
distributedRelation, distributedRelation,
copyStatement->filename, copyStatement->filename,
@ -541,13 +532,6 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId)
NULL, NULL,
copyStatement->attlist, copyStatement->attlist,
copyStatement->options); copyStatement->options);
#else
CopyState copyState = BeginCopyFrom(distributedRelation,
copyStatement->filename,
copyStatement->is_program,
copyStatement->attlist,
copyStatement->options);
#endif
CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
copyOutState->delim = (char *) delimiterCharacter; copyOutState->delim = (char *) delimiterCharacter;
@ -2232,11 +2216,7 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
copyStatement->relation = makeRangeVar(NULL, copyDest->intermediateResultIdPrefix, copyStatement->relation = makeRangeVar(NULL, copyDest->intermediateResultIdPrefix,
-1); -1);
#if (PG_VERSION_NUM >= 100000)
formatResultOption = makeDefElem("format", (Node *) makeString("result"), -1); formatResultOption = makeDefElem("format", (Node *) makeString("result"), -1);
#else
formatResultOption = makeDefElem("format", (Node *) makeString("result"));
#endif
copyStatement->options = list_make1(formatResultOption); copyStatement->options = list_make1(formatResultOption);
} }
else else
@ -2639,14 +2619,10 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
List *queryTreeList = NIL; List *queryTreeList = NIL;
StringInfo userFilePath = makeStringInfo(); StringInfo userFilePath = makeStringInfo();
#if (PG_VERSION_NUM >= 100000)
RawStmt *rawStmt = makeNode(RawStmt); RawStmt *rawStmt = makeNode(RawStmt);
rawStmt->stmt = queryNode; rawStmt->stmt = queryNode;
queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL);
#else
queryTreeList = pg_analyze_and_rewrite(queryNode, queryString, NULL, 0);
#endif
if (list_length(queryTreeList) != 1) if (list_length(queryTreeList) != 1)
{ {

View File

@ -26,9 +26,6 @@
#include "utils/relcache.h" #include "utils/relcache.h"
/* Local functions forward declarations for helper functions */
static char * GetSchemaNameFromDropObject(ListCell *dropSchemaCell);
/* /*
* ProcessDropSchemaStmt invalidates the foreign key cache if any table created * ProcessDropSchemaStmt invalidates the foreign key cache if any table created
* under dropped schema involved in any foreign key relationship. * under dropped schema involved in any foreign key relationship.
@ -52,7 +49,9 @@ ProcessDropSchemaStmt(DropStmt *dropStatement)
foreach(dropSchemaCell, dropStatement->objects) foreach(dropSchemaCell, dropStatement->objects)
{ {
char *schemaString = GetSchemaNameFromDropObject(dropSchemaCell); Value *schemaValue = (Value *) lfirst(dropSchemaCell);
char *schemaString = strVal(schemaValue);
Oid namespaceOid = get_namespace_oid(schemaString, true); Oid namespaceOid = get_namespace_oid(schemaString, true);
if (namespaceOid == InvalidOid) if (namespaceOid == InvalidOid)
@ -135,25 +134,3 @@ PlanAlterObjectSchemaStmt(AlterObjectSchemaStmt *alterObjectSchemaStmt,
return NIL; return NIL;
} }
/*
* GetSchemaNameFromDropObject gets the name of the drop schema from given
* list cell. This function is defined due to API change between PG 9.6 and
* PG 10.
*/
static char *
GetSchemaNameFromDropObject(ListCell *dropSchemaCell)
{
char *schemaString = NULL;
#if (PG_VERSION_NUM >= 100000)
Value *schemaValue = (Value *) lfirst(dropSchemaCell);
schemaString = strVal(schemaValue);
#else
List *schemaNameList = (List *) lfirst(dropSchemaCell);
schemaString = NameListToString(schemaNameList);
#endif
return schemaString;
}

View File

@ -123,13 +123,12 @@ ProcessDropTableStmt(DropStmt *dropTableStatement)
* CreateDistributedTable will attach it to its parent table automatically after * CreateDistributedTable will attach it to its parent table automatically after
* distributing it. * distributing it.
* *
* This function does nothing if PostgreSQL's version is less then 10 and given * This function does nothing if the provided CreateStmt is not a CREATE TABLE ...
* CreateStmt is not a CREATE TABLE ... PARTITION OF command. * PARTITION OF command.
*/ */
void void
ProcessCreateTableStmtPartitionOf(CreateStmt *createStatement) ProcessCreateTableStmtPartitionOf(CreateStmt *createStatement)
{ {
#if (PG_VERSION_NUM >= 100000)
if (createStatement->inhRelations != NIL && createStatement->partbound != NULL) if (createStatement->inhRelations != NIL && createStatement->partbound != NULL)
{ {
RangeVar *parentRelation = linitial(createStatement->inhRelations); RangeVar *parentRelation = linitial(createStatement->inhRelations);
@ -161,7 +160,6 @@ ProcessCreateTableStmtPartitionOf(CreateStmt *createStatement)
viaDeprecatedAPI); viaDeprecatedAPI);
} }
} }
#endif
} }
@ -188,13 +186,12 @@ ProcessCreateTableStmtPartitionOf(CreateStmt *createStatement)
* operation will be performed via propagating this ALTER TABLE ... ATTACH * operation will be performed via propagating this ALTER TABLE ... ATTACH
* PARTITION command to workers. * PARTITION command to workers.
* *
* This function does nothing if PostgreSQL's version is less then 10 and given * This function does nothing if the provided CreateStmt is not an ALTER TABLE ...
* CreateStmt is not a ALTER TABLE ... ATTACH PARTITION OF command. * ATTACH PARTITION OF command.
*/ */
void void
ProcessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement) ProcessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement)
{ {
#if (PG_VERSION_NUM >= 100000)
List *commandList = alterTableStatement->cmds; List *commandList = alterTableStatement->cmds;
ListCell *commandCell = NULL; ListCell *commandCell = NULL;
@ -240,7 +237,6 @@ ProcessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement)
} }
} }
} }
#endif
} }
@ -383,7 +379,6 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo
} }
} }
} }
#if (PG_VERSION_NUM >= 100000)
else if (alterTableType == AT_AttachPartition) else if (alterTableType == AT_AttachPartition)
{ {
PartitionCmd *partitionCommand = (PartitionCmd *) command->def; PartitionCmd *partitionCommand = (PartitionCmd *) command->def;
@ -418,7 +413,7 @@ PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCo
rightRelationId = RangeVarGetRelid(partitionCommand->name, NoLock, false); rightRelationId = RangeVarGetRelid(partitionCommand->name, NoLock, false);
} }
#endif
executeSequentially |= SetupExecutionModeForAlterTable(leftRelationId, executeSequentially |= SetupExecutionModeForAlterTable(leftRelationId,
command); command);
} }
@ -990,7 +985,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
break; break;
} }
#if (PG_VERSION_NUM >= 100000)
case AT_AttachPartition: case AT_AttachPartition:
{ {
Oid relationId = AlterTableLookupRelation(alterTableStatement, Oid relationId = AlterTableLookupRelation(alterTableStatement,
@ -1037,7 +1031,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
break; break;
} }
#endif
case AT_DropConstraint: case AT_DropConstraint:
{ {
LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);

View File

@ -57,12 +57,8 @@ RedirectCopyDataToRegularFile(const char *filename)
/* if received data has contents, append to regular file */ /* if received data has contents, append to regular file */
if (copyData->len > 0) if (copyData->len > 0)
{ {
#if (PG_VERSION_NUM >= 100000)
int appended = FileWrite(fileDesc, copyData->data, copyData->len, int appended = FileWrite(fileDesc, copyData->data, copyData->len,
PG_WAIT_IO); PG_WAIT_IO);
#else
int appended = FileWrite(fileDesc, copyData->data, copyData->len);
#endif
if (appended != copyData->len) if (appended != copyData->len)
{ {
@ -107,12 +103,7 @@ SendRegularFile(const char *filename)
SendCopyOutStart(); SendCopyOutStart();
#if (PG_VERSION_NUM >= 100000)
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize, PG_WAIT_IO); readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize, PG_WAIT_IO);
#else
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize);
#endif
while (readBytes > 0) while (readBytes > 0)
{ {
fileBuffer->len = readBytes; fileBuffer->len = readBytes;
@ -120,12 +111,8 @@ SendRegularFile(const char *filename)
SendCopyData(fileBuffer); SendCopyData(fileBuffer);
resetStringInfo(fileBuffer); resetStringInfo(fileBuffer);
#if (PG_VERSION_NUM >= 100000)
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize, readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize,
PG_WAIT_IO); PG_WAIT_IO);
#else
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize);
#endif
} }
SendCopyDone(); SendCopyDone();

View File

@ -67,45 +67,19 @@ static void PostProcessUtility(Node *parsetree);
/* /*
* multi_ProcessUtility9x is the 9.x-compatible wrapper for Citus' main utility * CitusProcessUtility is a convenience method to create a PlannedStmt out of pieces of a
* hook. It simply adapts the old-style hook to call into the new-style (10+) * utility statement before invoking ProcessUtility.
* hook, which is what now houses all actual logic.
*/
void
multi_ProcessUtility9x(Node *parsetree,
const char *queryString,
ProcessUtilityContext context,
ParamListInfo params,
DestReceiver *dest,
char *completionTag)
{
PlannedStmt *plannedStmt = makeNode(PlannedStmt);
plannedStmt->commandType = CMD_UTILITY;
plannedStmt->utilityStmt = parsetree;
multi_ProcessUtility(plannedStmt, queryString, context, params, NULL, dest,
completionTag);
}
/*
* CitusProcessUtility is a version-aware wrapper of ProcessUtility to account
* for argument differences between the 9.x and 10+ PostgreSQL versions.
*/ */
void void
CitusProcessUtility(Node *node, const char *queryString, ProcessUtilityContext context, CitusProcessUtility(Node *node, const char *queryString, ProcessUtilityContext context,
ParamListInfo params, DestReceiver *dest, char *completionTag) ParamListInfo params, DestReceiver *dest, char *completionTag)
{ {
#if (PG_VERSION_NUM >= 100000)
PlannedStmt *plannedStmt = makeNode(PlannedStmt); PlannedStmt *plannedStmt = makeNode(PlannedStmt);
plannedStmt->commandType = CMD_UTILITY; plannedStmt->commandType = CMD_UTILITY;
plannedStmt->utilityStmt = node; plannedStmt->utilityStmt = node;
ProcessUtility(plannedStmt, queryString, context, params, NULL, dest, ProcessUtility(plannedStmt, queryString, context, params, NULL, dest,
completionTag); completionTag);
#else
ProcessUtility(node, queryString, context, params, dest, completionTag);
#endif
} }
@ -139,13 +113,8 @@ multi_ProcessUtility(PlannedStmt *pstmt,
* that state. Since we never need to intercept transaction statements, * that state. Since we never need to intercept transaction statements,
* skip our checks and immediately fall into standard_ProcessUtility. * skip our checks and immediately fall into standard_ProcessUtility.
*/ */
#if (PG_VERSION_NUM >= 100000)
standard_ProcessUtility(pstmt, queryString, context, standard_ProcessUtility(pstmt, queryString, context,
params, queryEnv, dest, completionTag); params, queryEnv, dest, completionTag);
#else
standard_ProcessUtility(parsetree, queryString, context,
params, dest, completionTag);
#endif
return; return;
} }
@ -163,26 +132,18 @@ multi_ProcessUtility(PlannedStmt *pstmt,
* Ensure that utility commands do not behave any differently until CREATE * Ensure that utility commands do not behave any differently until CREATE
* EXTENSION is invoked. * EXTENSION is invoked.
*/ */
#if (PG_VERSION_NUM >= 100000)
standard_ProcessUtility(pstmt, queryString, context, standard_ProcessUtility(pstmt, queryString, context,
params, queryEnv, dest, completionTag); params, queryEnv, dest, completionTag);
#else
standard_ProcessUtility(parsetree, queryString, context,
params, dest, completionTag);
#endif
return; return;
} }
#if (PG_VERSION_NUM >= 100000)
if (IsA(parsetree, CreateSubscriptionStmt)) if (IsA(parsetree, CreateSubscriptionStmt))
{ {
CreateSubscriptionStmt *createSubStmt = (CreateSubscriptionStmt *) parsetree; CreateSubscriptionStmt *createSubStmt = (CreateSubscriptionStmt *) parsetree;
parsetree = ProcessCreateSubscriptionStmt(createSubStmt); parsetree = ProcessCreateSubscriptionStmt(createSubStmt);
} }
#endif
#if (PG_VERSION_NUM >= 110000) #if (PG_VERSION_NUM >= 110000)
if (IsA(parsetree, CallStmt)) if (IsA(parsetree, CallStmt))
{ {
@ -457,15 +418,9 @@ multi_ProcessUtility(PlannedStmt *pstmt,
} }
} }
#if (PG_VERSION_NUM >= 100000)
pstmt->utilityStmt = parsetree; pstmt->utilityStmt = parsetree;
standard_ProcessUtility(pstmt, queryString, context, standard_ProcessUtility(pstmt, queryString, context,
params, queryEnv, dest, completionTag); params, queryEnv, dest, completionTag);
#else
standard_ProcessUtility(parsetree, queryString, context,
params, dest, completionTag);
#endif
/* /*
* We only process CREATE TABLE ... PARTITION OF commands in the function below * We only process CREATE TABLE ... PARTITION OF commands in the function below

View File

@ -313,25 +313,6 @@ ReportResultError(MultiConnection *connection, PGresult *result, int elevel)
} }
/* *INDENT-OFF* */
#if (PG_VERSION_NUM < 100000)
/*
* Make copy of string with all trailing newline characters removed.
*/
char *
pchomp(const char *in)
{
size_t n;
n = strlen(in);
while (n > 0 && in[n - 1] == '\n')
n--;
return pnstrdup(in, n);
}
#endif
/* *INDENT-ON* */ /* *INDENT-ON* */
@ -712,12 +693,7 @@ FinishConnectionIO(MultiConnection *connection, bool raiseInterrupts)
return true; return true;
} }
#if (PG_VERSION_NUM >= 100000)
rc = WaitLatchOrSocket(MyLatch, waitFlags, socket, 0, PG_WAIT_EXTENSION); rc = WaitLatchOrSocket(MyLatch, waitFlags, socket, 0, PG_WAIT_EXTENSION);
#else
rc = WaitLatchOrSocket(MyLatch, waitFlags, socket, 0);
#endif
if (rc & WL_POSTMASTER_DEATH) if (rc & WL_POSTMASTER_DEATH)
{ {
ereport(ERROR, (errmsg("postmaster was shut down, exiting"))); ereport(ERROR, (errmsg("postmaster was shut down, exiting")));
@ -806,10 +782,7 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts)
int pendingConnectionCount = totalConnectionCount - int pendingConnectionCount = totalConnectionCount -
pendingConnectionsStartIndex; pendingConnectionsStartIndex;
/* /* rebuild the WaitEventSet whenever connections are ready */
* We cannot disable wait events as of postgres 9.6, so we rebuild the
* WaitEventSet whenever connections are ready.
*/
if (rebuildWaitEventSet) if (rebuildWaitEventSet)
{ {
if (waitEventSet != NULL) if (waitEventSet != NULL)
@ -824,13 +797,8 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts)
} }
/* wait for I/O events */ /* wait for I/O events */
#if (PG_VERSION_NUM >= 100000)
eventCount = WaitEventSetWait(waitEventSet, timeout, events, eventCount = WaitEventSetWait(waitEventSet, timeout, events,
pendingConnectionCount, WAIT_EVENT_CLIENT_READ); pendingConnectionCount, WAIT_EVENT_CLIENT_READ);
#else
eventCount = WaitEventSetWait(waitEventSet, timeout, events,
pendingConnectionCount);
#endif
/* process I/O events */ /* process I/O events */
for (; eventIndex < eventCount; eventIndex++) for (; eventIndex < eventCount; eventIndex++)

View File

@ -412,11 +412,7 @@ RemoteFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest)
static void static void
WriteToLocalFile(StringInfo copyData, File fileDesc) WriteToLocalFile(StringInfo copyData, File fileDesc)
{ {
#if (PG_VERSION_NUM >= 100000)
int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len, PG_WAIT_IO); int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len, PG_WAIT_IO);
#else
int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len);
#endif
if (bytesWritten < 0) if (bytesWritten < 0)
{ {
ereport(ERROR, (errcode_for_file_access(), ereport(ERROR, (errcode_for_file_access(),

View File

@ -270,21 +270,12 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript
DefElem *copyOption = NULL; DefElem *copyOption = NULL;
List *copyOptions = NIL; List *copyOptions = NIL;
#if (PG_VERSION_NUM >= 100000)
int location = -1; /* "unknown" token location */ int location = -1; /* "unknown" token location */
copyOption = makeDefElem("format", (Node *) makeString(copyFormat), location); copyOption = makeDefElem("format", (Node *) makeString(copyFormat), location);
#else
copyOption = makeDefElem("format", (Node *) makeString(copyFormat));
#endif
copyOptions = lappend(copyOptions, copyOption); copyOptions = lappend(copyOptions, copyOption);
#if (PG_VERSION_NUM >= 100000)
copyState = BeginCopyFrom(NULL, stubRelation, fileName, false, NULL, copyState = BeginCopyFrom(NULL, stubRelation, fileName, false, NULL,
NULL, copyOptions); NULL, copyOptions);
#else
copyState = BeginCopyFrom(stubRelation, fileName, false, NULL,
copyOptions);
#endif
while (true) while (true)
{ {
@ -351,14 +342,8 @@ Query *
ParseQueryString(const char *queryString) ParseQueryString(const char *queryString)
{ {
Query *query = NULL; Query *query = NULL;
#if (PG_VERSION_NUM >= 100000)
RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString);
List *queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); List *queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL);
#else
Node *queryTreeNode = ParseTreeNode(queryString);
List *queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0);
#endif
if (list_length(queryTreeList) != 1) if (list_length(queryTreeList) != 1)
{ {
@ -416,11 +401,7 @@ ExecutePlanIntoDestReceiver(PlannedStmt *queryPlan, ParamListInfo params,
NULL); NULL);
PortalStart(portal, params, eflags, GetActiveSnapshot()); PortalStart(portal, params, eflags, GetActiveSnapshot());
#if (PG_VERSION_NUM >= 100000)
PortalRun(portal, count, false, true, dest, dest, NULL); PortalRun(portal, count, false, true, dest, dest, NULL);
#else
PortalRun(portal, count, false, dest, dest, NULL);
#endif
PortalDrop(portal, false); PortalDrop(portal, false);
} }

View File

@ -58,9 +58,7 @@
#include "utils/elog.h" #include "utils/elog.h"
#include "utils/errcodes.h" #include "utils/errcodes.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#if (PG_VERSION_NUM >= 100000)
#include "utils/varlena.h" #include "utils/varlena.h"
#endif
/* Local functions forward declarations */ /* Local functions forward declarations */
@ -112,12 +110,8 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
LOCKMODE lockMode = 0; LOCKMODE lockMode = 0;
char partitionMethod = 0; char partitionMethod = 0;
bool failOK = false; bool failOK = false;
#if (PG_VERSION_NUM >= 100000)
RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString);
queryTreeNode = rawStmt->stmt; queryTreeNode = rawStmt->stmt;
#else
queryTreeNode = ParseTreeNode(queryString);
#endif
EnsureCoordinator(); EnsureCoordinator();
CheckCitusVersion(ERROR); CheckCitusVersion(ERROR);
@ -152,11 +146,7 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
CheckDistributedTable(relationId); CheckDistributedTable(relationId);
EnsureTablePermissions(relationId, ACL_DELETE); EnsureTablePermissions(relationId, ACL_DELETE);
#if (PG_VERSION_NUM >= 100000)
queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL);
#else
queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0);
#endif
deleteQuery = (Query *) linitial(queryTreeList); deleteQuery = (Query *) linitial(queryTreeList);
CheckTableCount(deleteQuery); CheckTableCount(deleteQuery);
@ -593,11 +583,7 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList,
restrictInfoList = lappend(restrictInfoList, lessThanRestrictInfo); restrictInfoList = lappend(restrictInfoList, lessThanRestrictInfo);
restrictInfoList = lappend(restrictInfoList, greaterThanRestrictInfo); restrictInfoList = lappend(restrictInfoList, greaterThanRestrictInfo);
#if (PG_VERSION_NUM >= 100000)
dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList, false); dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList, false);
#else
dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList);
#endif
if (dropShard) if (dropShard)
{ {
dropShardIntervalList = lappend(dropShardIntervalList, shardInterval); dropShardIntervalList = lappend(dropShardIntervalList, shardInterval);

View File

@ -92,12 +92,8 @@ master_modify_multiple_shards(PG_FUNCTION_ARGS)
CmdType operation = CMD_UNKNOWN; CmdType operation = CMD_UNKNOWN;
TaskType taskType = TASK_TYPE_INVALID_FIRST; TaskType taskType = TASK_TYPE_INVALID_FIRST;
bool truncateOperation = false; bool truncateOperation = false;
#if (PG_VERSION_NUM >= 100000)
RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString);
queryTreeNode = rawStmt->stmt; queryTreeNode = rawStmt->stmt;
#else
queryTreeNode = ParseTreeNode(queryString);
#endif
CheckCitusVersion(ERROR); CheckCitusVersion(ERROR);
@ -152,11 +148,7 @@ master_modify_multiple_shards(PG_FUNCTION_ARGS)
CheckDistributedTable(relationId); CheckDistributedTable(relationId);
#if (PG_VERSION_NUM >= 100000)
queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL);
#else
queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0);
#endif
modifyQuery = (Query *) linitial(queryTreeList); modifyQuery = (Query *) linitial(queryTreeList);
operation = modifyQuery->commandType; operation = modifyQuery->commandType;

View File

@ -61,9 +61,7 @@
#include "utils/relcache.h" #include "utils/relcache.h"
#include "utils/ruleutils.h" #include "utils/ruleutils.h"
#include "utils/tqual.h" #include "utils/tqual.h"
#if (PG_VERSION_NUM >= 100000)
#include "utils/varlena.h" #include "utils/varlena.h"
#endif
/* Shard related configuration */ /* Shard related configuration */

View File

@ -24,9 +24,7 @@
#include "commands/tablecmds.h" #include "commands/tablecmds.h"
#include "catalog/indexing.h" #include "catalog/indexing.h"
#include "catalog/namespace.h" #include "catalog/namespace.h"
#if (PG_VERSION_NUM >= 100000)
#include "catalog/partition.h" #include "catalog/partition.h"
#endif
#include "distributed/citus_ruleutils.h" #include "distributed/citus_ruleutils.h"
#include "distributed/colocation_utils.h" #include "distributed/colocation_utils.h"
#include "distributed/commands.h" #include "distributed/commands.h"

View File

@ -21,8 +21,6 @@
#include "libpq/hba.h" #include "libpq/hba.h"
#if (PG_VERSION_NUM >= 100000) #if (PG_VERSION_NUM >= 100000)
#include "common/ip.h" #include "common/ip.h"
#else
#include "libpq/ip.h"
#endif #endif
#include "libpq/libpq-be.h" #include "libpq/libpq-be.h"
#include "postmaster/postmaster.h" #include "postmaster/postmaster.h"

View File

@ -918,11 +918,7 @@ List *
SequenceDDLCommandsForTable(Oid relationId) SequenceDDLCommandsForTable(Oid relationId)
{ {
List *sequenceDDLList = NIL; List *sequenceDDLList = NIL;
#if (PG_VERSION_NUM >= 100000)
List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber); List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber);
#else
List *ownedSequences = getOwnedSequences(relationId);
#endif
ListCell *listCell; ListCell *listCell;
char *ownerName = TableOwner(relationId); char *ownerName = TableOwner(relationId);
@ -1008,7 +1004,6 @@ EnsureSupportedSequenceColumnType(Oid sequenceOid)
bool hasMetadataWorkers = HasMetadataWorkers(); bool hasMetadataWorkers = HasMetadataWorkers();
/* call sequenceIsOwned in order to get the tableId and columnId */ /* call sequenceIsOwned in order to get the tableId and columnId */
#if (PG_VERSION_NUM >= 100000)
bool sequenceOwned = sequenceIsOwned(sequenceOid, DEPENDENCY_AUTO, &tableId, bool sequenceOwned = sequenceIsOwned(sequenceOid, DEPENDENCY_AUTO, &tableId,
&columnId); &columnId);
if (!sequenceOwned) if (!sequenceOwned)
@ -1018,9 +1013,6 @@ EnsureSupportedSequenceColumnType(Oid sequenceOid)
} }
Assert(sequenceOwned); Assert(sequenceOwned);
#else
sequenceIsOwned(sequenceOid, &tableId, &columnId);
#endif
shouldSyncMetadata = ShouldSyncTableMetadata(tableId); shouldSyncMetadata = ShouldSyncTableMetadata(tableId);

View File

@ -337,7 +337,6 @@ AdjustPartitioningForDistributedPlanning(Query *queryTree,
{ {
rangeTableEntry->inh = setPartitionedTablesInherited; rangeTableEntry->inh = setPartitionedTablesInherited;
#if (PG_VERSION_NUM >= 100000)
if (setPartitionedTablesInherited) if (setPartitionedTablesInherited)
{ {
rangeTableEntry->relkind = RELKIND_PARTITIONED_TABLE; rangeTableEntry->relkind = RELKIND_PARTITIONED_TABLE;
@ -346,7 +345,6 @@ AdjustPartitioningForDistributedPlanning(Query *queryTree,
{ {
rangeTableEntry->relkind = RELKIND_RELATION; rangeTableEntry->relkind = RELKIND_RELATION;
} }
#endif
} }
} }
} }

View File

@ -87,15 +87,10 @@ static void ExplainTaskPlacement(ShardPlacement *taskPlacement, List *explainOut
static StringInfo BuildRemoteExplainQuery(char *queryString, ExplainState *es); static StringInfo BuildRemoteExplainQuery(char *queryString, ExplainState *es);
/* Static Explain functions copied from explain.c */ /* Static Explain functions copied from explain.c */
#if (PG_VERSION_NUM >= 100000)
static void ExplainOneQuery(Query *query, int cursorOptions, static void ExplainOneQuery(Query *query, int cursorOptions,
IntoClause *into, ExplainState *es, IntoClause *into, ExplainState *es,
const char *queryString, ParamListInfo params, const char *queryString, ParamListInfo params,
QueryEnvironment *queryEnv); QueryEnvironment *queryEnv);
#else
static void ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es,
const char *queryString, ParamListInfo params);
#endif
#if (PG_VERSION_NUM < 110000) #if (PG_VERSION_NUM < 110000)
static void ExplainOpenGroup(const char *objtype, const char *labelname, static void ExplainOpenGroup(const char *objtype, const char *labelname,
bool labeled, ExplainState *es); bool labeled, ExplainState *es);
@ -165,11 +160,7 @@ CoordinatorInsertSelectExplainScan(CustomScanState *node, List *ancestors,
ExplainOpenGroup("Select Query", "Select Query", false, es); ExplainOpenGroup("Select Query", "Select Query", false, es);
/* explain the inner SELECT query */ /* explain the inner SELECT query */
#if (PG_VERSION_NUM >= 100000)
ExplainOneQuery(query, 0, into, es, queryString, params, NULL); ExplainOneQuery(query, 0, into, es, queryString, params, NULL);
#else
ExplainOneQuery(query, into, es, queryString, params);
#endif
ExplainCloseGroup("Select Query", "Select Query", false, es); ExplainCloseGroup("Select Query", "Select Query", false, es);
} }
@ -211,11 +202,7 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SET_CURRENT(planduration);
INSTR_TIME_SUBTRACT(planduration, planduration); INSTR_TIME_SUBTRACT(planduration, planduration);
#if (PG_VERSION_NUM >= 100000)
ExplainOnePlan(plan, into, es, queryString, params, NULL, &planduration); ExplainOnePlan(plan, into, es, queryString, params, NULL, &planduration);
#else
ExplainOnePlan(plan, into, es, queryString, params, &planduration);
#endif
if (es->format == EXPLAIN_FORMAT_TEXT) if (es->format == EXPLAIN_FORMAT_TEXT)
{ {
@ -654,15 +641,10 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es)
* "into" is NULL unless we are explaining the contents of a CreateTableAsStmt. * "into" is NULL unless we are explaining the contents of a CreateTableAsStmt.
*/ */
static void static void
#if (PG_VERSION_NUM >= 100000)
ExplainOneQuery(Query *query, int cursorOptions, ExplainOneQuery(Query *query, int cursorOptions,
IntoClause *into, ExplainState *es, IntoClause *into, ExplainState *es,
const char *queryString, ParamListInfo params, const char *queryString, ParamListInfo params,
QueryEnvironment *queryEnv) QueryEnvironment *queryEnv)
#else
ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es,
const char *queryString, ParamListInfo params)
#endif
{ {
/* if an advisor plugin is present, let it manage things */ /* if an advisor plugin is present, let it manage things */
if (ExplainOneQuery_hook) if (ExplainOneQuery_hook)
@ -672,8 +654,6 @@ ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es,
#elif (PG_VERSION_NUM >= 100000) #elif (PG_VERSION_NUM >= 100000)
(*ExplainOneQuery_hook) (query, cursorOptions, into, es, (*ExplainOneQuery_hook) (query, cursorOptions, into, es,
queryString, params); queryString, params);
#else
(*ExplainOneQuery_hook) (query, into, es, queryString, params);
#endif #endif
else else
{ {
@ -684,22 +664,14 @@ ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es,
INSTR_TIME_SET_CURRENT(planstart); INSTR_TIME_SET_CURRENT(planstart);
/* plan the query */ /* plan the query */
#if (PG_VERSION_NUM >= 100000)
plan = pg_plan_query(query, cursorOptions, params); plan = pg_plan_query(query, cursorOptions, params);
#else
plan = pg_plan_query(query, into ? 0 : CURSOR_OPT_PARALLEL_OK, params);
#endif
INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SET_CURRENT(planduration);
INSTR_TIME_SUBTRACT(planduration, planstart); INSTR_TIME_SUBTRACT(planduration, planstart);
/* run it (if needed) and produce output */ /* run it (if needed) and produce output */
#if (PG_VERSION_NUM >= 100000)
ExplainOnePlan(plan, into, es, queryString, params, queryEnv, ExplainOnePlan(plan, into, es, queryString, params, queryEnv,
&planduration); &planduration);
#else
ExplainOnePlan(plan, into, es, queryString, params, &planduration);
#endif
} }
} }

View File

@ -1953,12 +1953,8 @@ MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType,
* will convert the types of the aggregates if necessary. * will convert the types of the aggregates if necessary.
*/ */
operatorNameList = list_make1(makeString(DIVISION_OPER_NAME)); operatorNameList = list_make1(makeString(DIVISION_OPER_NAME));
#if (PG_VERSION_NUM >= 100000)
opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, NULL, opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, NULL,
-1); -1);
#else
opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, -1);
#endif
return opExpr; return opExpr;
} }

View File

@ -142,9 +142,7 @@ static bool MultiRouterPlannableQuery(Query *query,
static DeferredErrorMessage * ErrorIfQueryHasModifyingCTE(Query *queryTree); static DeferredErrorMessage * ErrorIfQueryHasModifyingCTE(Query *queryTree);
static RangeTblEntry * GetUpdateOrDeleteRTE(Query *query); static RangeTblEntry * GetUpdateOrDeleteRTE(Query *query);
static bool SelectsFromDistributedTable(List *rangeTableList, Query *query); static bool SelectsFromDistributedTable(List *rangeTableList, Query *query);
#if (PG_VERSION_NUM >= 100000)
static List * get_all_actual_clauses(List *restrictinfo_list); static List * get_all_actual_clauses(List *restrictinfo_list);
#endif
static int CompareInsertValuesByShardId(const void *leftElement, static int CompareInsertValuesByShardId(const void *leftElement,
const void *rightElement); const void *rightElement);
static uint64 GetInitialShardId(List *relationShardList); static uint64 GetInitialShardId(List *relationShardList);
@ -1294,13 +1292,8 @@ TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTre
rightConst->constisnull = newValue->constisnull; rightConst->constisnull = newValue->constisnull;
rightConst->constbyval = newValue->constbyval; rightConst->constbyval = newValue->constbyval;
#if (PG_VERSION_NUM >= 100000)
predicateIsImplied = predicate_implied_by(list_make1(equalityExpr), predicateIsImplied = predicate_implied_by(list_make1(equalityExpr),
restrictClauseList, false); restrictClauseList, false);
#else
predicateIsImplied = predicate_implied_by(list_make1(equalityExpr),
restrictClauseList);
#endif
if (predicateIsImplied) if (predicateIsImplied)
{ {
/* target entry of the form SET col = <x> WHERE col = <x> AND ... */ /* target entry of the form SET col = <x> WHERE col = <x> AND ... */
@ -2518,13 +2511,10 @@ NormalizeMultiRowInsertTargetList(Query *query)
valuesListCell->data.ptr_value = (void *) expandedValuesList; valuesListCell->data.ptr_value = (void *) expandedValuesList;
} }
#if (PG_VERSION_NUM >= 100000)
/* reset coltypes, coltypmods, colcollations and rebuild them below */ /* reset coltypes, coltypmods, colcollations and rebuild them below */
valuesRTE->coltypes = NIL; valuesRTE->coltypes = NIL;
valuesRTE->coltypmods = NIL; valuesRTE->coltypmods = NIL;
valuesRTE->colcollations = NIL; valuesRTE->colcollations = NIL;
#endif
foreach(targetEntryCell, query->targetList) foreach(targetEntryCell, query->targetList)
{ {
@ -2544,11 +2534,9 @@ NormalizeMultiRowInsertTargetList(Query *query)
targetTypmod = exprTypmod(targetExprNode); targetTypmod = exprTypmod(targetExprNode);
targetColl = exprCollation(targetExprNode); targetColl = exprCollation(targetExprNode);
#if (PG_VERSION_NUM >= 100000)
valuesRTE->coltypes = lappend_oid(valuesRTE->coltypes, targetType); valuesRTE->coltypes = lappend_oid(valuesRTE->coltypes, targetType);
valuesRTE->coltypmods = lappend_int(valuesRTE->coltypmods, targetTypmod); valuesRTE->coltypmods = lappend_int(valuesRTE->coltypmods, targetTypmod);
valuesRTE->colcollations = lappend_oid(valuesRTE->colcollations, targetColl); valuesRTE->colcollations = lappend_oid(valuesRTE->colcollations, targetColl);
#endif
if (IsA(targetExprNode, Var)) if (IsA(targetExprNode, Var))
{ {
@ -2996,8 +2984,6 @@ ErrorIfQueryHasModifyingCTE(Query *queryTree)
} }
#if (PG_VERSION_NUM >= 100000)
/* /*
* get_all_actual_clauses * get_all_actual_clauses
* *
@ -3024,9 +3010,6 @@ get_all_actual_clauses(List *restrictinfo_list)
} }
#endif
/* /*
* CompareInsertValuesByShardId does what it says in the name. Used for sorting * CompareInsertValuesByShardId does what it says in the name. Used for sorting
* InsertValues objects by their shard. * InsertValues objects by their shard.

View File

@ -547,7 +547,6 @@ RelayEventExtendNamesForInterShardCommands(Node *parseTree, uint64 leftShardId,
} }
} }
} }
#if (PG_VERSION_NUM >= 100000)
else if (command->subtype == AT_AttachPartition || else if (command->subtype == AT_AttachPartition ||
command->subtype == AT_DetachPartition) command->subtype == AT_DetachPartition)
{ {
@ -556,7 +555,6 @@ RelayEventExtendNamesForInterShardCommands(Node *parseTree, uint64 leftShardId,
referencedTableName = &(partitionCommand->name->relname); referencedTableName = &(partitionCommand->name->relname);
relationSchemaName = &(partitionCommand->name->schemaname); relationSchemaName = &(partitionCommand->name->schemaname);
} }
#endif
else else
{ {
continue; continue;

View File

@ -203,11 +203,7 @@ _PG_init(void)
planner_hook = distributed_planner; planner_hook = distributed_planner;
/* register utility hook */ /* register utility hook */
#if (PG_VERSION_NUM >= 100000)
ProcessUtility_hook = multi_ProcessUtility; ProcessUtility_hook = multi_ProcessUtility;
#else
ProcessUtility_hook = multi_ProcessUtility9x;
#endif
/* register for planner hook */ /* register for planner hook */
set_rel_pathlist_hook = multi_relation_restriction_hook; set_rel_pathlist_hook = multi_relation_restriction_hook;

View File

@ -52,12 +52,8 @@ deparse_shard_query_test(PG_FUNCTION_ARGS)
ListCell *queryTreeCell = NULL; ListCell *queryTreeCell = NULL;
List *queryTreeList = NIL; List *queryTreeList = NIL;
#if (PG_VERSION_NUM >= 100000)
queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar, queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar,
NULL, 0, NULL); NULL, 0, NULL);
#else
queryTreeList = pg_analyze_and_rewrite(parsetree, queryStringChar, NULL, 0);
#endif
foreach(queryTreeCell, queryTreeList) foreach(queryTreeCell, queryTreeList)
{ {

View File

@ -271,12 +271,8 @@ relation_count_in_query(PG_FUNCTION_ARGS)
ListCell *queryTreeCell = NULL; ListCell *queryTreeCell = NULL;
List *queryTreeList = NIL; List *queryTreeList = NIL;
#if (PG_VERSION_NUM >= 100000)
queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar, queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar,
NULL, 0, NULL); NULL, 0, NULL);
#else
queryTreeList = pg_analyze_and_rewrite(parsetree, queryStringChar, NULL, 0);
#endif
foreach(queryTreeCell, queryTreeList) foreach(queryTreeCell, queryTreeList)
{ {

View File

@ -37,9 +37,7 @@ generate_alter_table_detach_partition_command(PG_FUNCTION_ARGS)
{ {
char *command = ""; char *command = "";
#if (PG_VERSION_NUM >= 100000)
command = GenerateDetachPartitionCommand(PG_GETARG_OID(0)); command = GenerateDetachPartitionCommand(PG_GETARG_OID(0));
#endif
PG_RETURN_TEXT_P(cstring_to_text(command)); PG_RETURN_TEXT_P(cstring_to_text(command));
} }
@ -53,9 +51,7 @@ generate_alter_table_attach_partition_command(PG_FUNCTION_ARGS)
{ {
char *command = ""; char *command = "";
#if (PG_VERSION_NUM >= 100000)
command = GenerateAlterTableAttachPartitionCommand(PG_GETARG_OID(0)); command = GenerateAlterTableAttachPartitionCommand(PG_GETARG_OID(0));
#endif
PG_RETURN_TEXT_P(cstring_to_text(command)); PG_RETURN_TEXT_P(cstring_to_text(command));
} }
@ -69,9 +65,7 @@ generate_partition_information(PG_FUNCTION_ARGS)
{ {
char *command = ""; char *command = "";
#if (PG_VERSION_NUM >= 100000)
command = GeneratePartitioningInformation(PG_GETARG_OID(0)); command = GeneratePartitioningInformation(PG_GETARG_OID(0));
#endif
PG_RETURN_TEXT_P(cstring_to_text(command)); PG_RETURN_TEXT_P(cstring_to_text(command));
} }
@ -85,7 +79,6 @@ print_partitions(PG_FUNCTION_ARGS)
{ {
StringInfo resultRelationNames = makeStringInfo(); StringInfo resultRelationNames = makeStringInfo();
#if (PG_VERSION_NUM >= 100000)
List *partitionList = PartitionList(PG_GETARG_OID(0)); List *partitionList = PartitionList(PG_GETARG_OID(0));
ListCell *partitionOidCell = NULL; ListCell *partitionOidCell = NULL;
@ -103,7 +96,6 @@ print_partitions(PG_FUNCTION_ARGS)
appendStringInfoString(resultRelationNames, get_rel_name(partitionOid)); appendStringInfoString(resultRelationNames, get_rel_name(partitionOid));
} }
#endif
PG_RETURN_TEXT_P(cstring_to_text(resultRelationNames->data)); PG_RETURN_TEXT_P(cstring_to_text(resultRelationNames->data));
} }

View File

@ -46,11 +46,7 @@
typedef struct BackendManagementShmemData typedef struct BackendManagementShmemData
{ {
int trancheId; int trancheId;
#if (PG_VERSION_NUM >= 100000)
NamedLWLockTranche namedLockTranche; NamedLWLockTranche namedLockTranche;
#else
LWLockTranche lockTranche;
#endif
LWLock lock; LWLock lock;
/* /*
@ -554,36 +550,18 @@ BackendManagementShmemInit(void)
int totalProcs = 0; int totalProcs = 0;
char *trancheName = "Backend Management Tranche"; char *trancheName = "Backend Management Tranche";
#if (PG_VERSION_NUM >= 100000)
NamedLWLockTranche *namedLockTranche = NamedLWLockTranche *namedLockTranche =
&backendManagementShmemData->namedLockTranche; &backendManagementShmemData->namedLockTranche;
#else
LWLockTranche *lockTranche = &backendManagementShmemData->lockTranche;
#endif
/* start by zeroing out all the memory */ /* start by zeroing out all the memory */
memset(backendManagementShmemData, 0, memset(backendManagementShmemData, 0,
BackendManagementShmemSize()); BackendManagementShmemSize());
#if (PG_VERSION_NUM >= 100000)
namedLockTranche->trancheId = LWLockNewTrancheId(); namedLockTranche->trancheId = LWLockNewTrancheId();
LWLockRegisterTranche(namedLockTranche->trancheId, trancheName); LWLockRegisterTranche(namedLockTranche->trancheId, trancheName);
LWLockInitialize(&backendManagementShmemData->lock, LWLockInitialize(&backendManagementShmemData->lock,
namedLockTranche->trancheId); namedLockTranche->trancheId);
#else
backendManagementShmemData->trancheId = LWLockNewTrancheId();
/* we only need a single lock */
lockTranche->array_base = &backendManagementShmemData->lock;
lockTranche->array_stride = sizeof(LWLock);
lockTranche->name = trancheName;
LWLockRegisterTranche(backendManagementShmemData->trancheId, lockTranche);
LWLockInitialize(&backendManagementShmemData->lock,
backendManagementShmemData->trancheId);
#endif
/* start the distributed transaction ids from 1 */ /* start the distributed transaction ids from 1 */
pg_atomic_init_u64(&backendManagementShmemData->nextTransactionNumber, 1); pg_atomic_init_u64(&backendManagementShmemData->nextTransactionNumber, 1);

View File

@ -36,9 +36,7 @@
#include "storage/spin.h" #include "storage/spin.h"
#include "storage/s_lock.h" #include "storage/s_lock.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#if PG_VERSION_NUM >= 100000
#include "utils/fmgrprotos.h" #include "utils/fmgrprotos.h"
#endif
#include "utils/inet.h" #include "utils/inet.h"
#include "utils/timestamp.h" #include "utils/timestamp.h"
@ -121,12 +119,8 @@
* We get the query_host_name and query_host_port while opening the connection to * We get the query_host_name and query_host_port while opening the connection to
* the node. We also replace initiator_node_identifier with initiator_node_host * the node. We also replace initiator_node_identifier with initiator_node_host
* and initiator_node_port. Thus, they are not in the query below. * and initiator_node_port. Thus, they are not in the query below.
*
* Also, backend_type introduced with pg 10+ so we have null in the previous verions.
*/ */
#if PG_VERSION_NUM >= 100000
#define CITUS_DIST_STAT_ACTIVITY_QUERY \ #define CITUS_DIST_STAT_ACTIVITY_QUERY \
"\ "\
SELECT \ SELECT \
@ -196,78 +190,6 @@
pg_stat_activity.application_name = 'citus' \ pg_stat_activity.application_name = 'citus' \
AND \ AND \
pg_stat_activity.query NOT ILIKE '%stat_activity%';" pg_stat_activity.query NOT ILIKE '%stat_activity%';"
#else
#define CITUS_DIST_STAT_ACTIVITY_QUERY \
"\
SELECT \
dist_txs.initiator_node_identifier, \
dist_txs.transaction_number, \
dist_txs.transaction_stamp, \
pg_stat_activity.datid, \
pg_stat_activity.datname, \
pg_stat_activity.pid, \
pg_stat_activity.usesysid, \
pg_stat_activity.usename, \
pg_stat_activity.application_name, \
pg_stat_activity.client_addr, \
pg_stat_activity.client_hostname, \
pg_stat_activity.client_port, \
pg_stat_activity.backend_start, \
pg_stat_activity.xact_start, \
pg_stat_activity.query_start, \
pg_stat_activity.state_change, \
pg_stat_activity.wait_event_type, \
pg_stat_activity.wait_event, \
pg_stat_activity.state, \
pg_stat_activity.backend_xid, \
pg_stat_activity.backend_xmin, \
pg_stat_activity.query, \
null \
FROM \
pg_stat_activity \
INNER JOIN \
get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \
ON pg_stat_activity.pid = dist_txs.process_id \
WHERE \
dist_txs.worker_query = false;"
#define CITUS_WORKER_STAT_ACTIVITY_QUERY \
"\
SELECT \
dist_txs.initiator_node_identifier, \
dist_txs.transaction_number, \
dist_txs.transaction_stamp, \
pg_stat_activity.datid, \
pg_stat_activity.datname, \
pg_stat_activity.pid, \
pg_stat_activity.usesysid, \
pg_stat_activity.usename, \
pg_stat_activity.application_name, \
pg_stat_activity.client_addr, \
pg_stat_activity.client_hostname, \
pg_stat_activity.client_port, \
pg_stat_activity.backend_start, \
pg_stat_activity.xact_start, \
pg_stat_activity.query_start, \
pg_stat_activity.state_change, \
pg_stat_activity.wait_event_type, \
pg_stat_activity.wait_event, \
pg_stat_activity.state, \
pg_stat_activity.backend_xid, \
pg_stat_activity.backend_xmin, \
pg_stat_activity.query, \
null \
FROM \
pg_stat_activity \
LEFT JOIN \
get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \
ON pg_stat_activity.pid = dist_txs.process_id \
WHERE \
pg_stat_activity.application_name = 'citus' \
AND \
pg_stat_activity.query NOT ILIKE '%stat_activity%';"
#endif
typedef struct CitusDistStat typedef struct CitusDistStat
{ {

View File

@ -189,11 +189,7 @@ citus_evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod,
/* /*
* And evaluate it. * And evaluate it.
*/ */
#if (PG_VERSION_NUM >= 100000)
const_val = ExecEvalExprSwitchContext(exprstate, econtext, &const_is_null); const_val = ExecEvalExprSwitchContext(exprstate, econtext, &const_is_null);
#else
const_val = ExecEvalExprSwitchContext(exprstate, econtext, &const_is_null, NULL);
#endif
/* Get info needed about result datatype */ /* Get info needed about result datatype */
get_typlenbyval(result_type, &resultTypLen, &resultTypByVal); get_typlenbyval(result_type, &resultTypLen, &resultTypByVal);
@ -259,13 +255,11 @@ CitusIsVolatileFunction(Node *node)
return true; return true;
} }
#if (PG_VERSION_NUM >= 100000)
if (IsA(node, NextValueExpr)) if (IsA(node, NextValueExpr))
{ {
/* NextValueExpr is volatile */ /* NextValueExpr is volatile */
return true; return true;
} }
#endif
return false; return false;
} }
@ -302,7 +296,6 @@ CitusIsMutableFunction(Node *node)
return true; return true;
} }
#if (PG_VERSION_NUM >= 100000)
if (IsA(node, SQLValueFunction)) if (IsA(node, SQLValueFunction))
{ {
/* all variants of SQLValueFunction are stable */ /* all variants of SQLValueFunction are stable */
@ -314,7 +307,6 @@ CitusIsMutableFunction(Node *node)
/* NextValueExpr is volatile */ /* NextValueExpr is volatile */
return true; return true;
} }
#endif
return false; return false;
} }

View File

@ -302,10 +302,8 @@ GetRangeTblKind(RangeTblEntry *rte)
switch (rte->rtekind) switch (rte->rtekind)
{ {
/* directly rtekind if it's not possibly an extended RTE */ /* directly rtekind if it's not possibly an extended RTE */
#if (PG_VERSION_NUM >= 100000)
case RTE_TABLEFUNC: case RTE_TABLEFUNC:
case RTE_NAMEDTUPLESTORE: case RTE_NAMEDTUPLESTORE:
#endif
case RTE_RELATION: case RTE_RELATION:
case RTE_SUBQUERY: case RTE_SUBQUERY:
case RTE_JOIN: case RTE_JOIN:

View File

@ -204,17 +204,10 @@ pg_get_sequencedef_string(Oid sequenceRelationId)
/* build our DDL command */ /* build our DDL command */
qualifiedSequenceName = generate_relation_name(sequenceRelationId, NIL); qualifiedSequenceName = generate_relation_name(sequenceRelationId, NIL);
#if (PG_VERSION_NUM >= 100000)
sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName, sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName,
pgSequenceForm->seqincrement, pgSequenceForm->seqmin, pgSequenceForm->seqincrement, pgSequenceForm->seqmin,
pgSequenceForm->seqmax, pgSequenceForm->seqstart, pgSequenceForm->seqmax, pgSequenceForm->seqstart,
pgSequenceForm->seqcycle ? "" : "NO "); pgSequenceForm->seqcycle ? "" : "NO ");
#else
sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName,
pgSequenceForm->increment_by, pgSequenceForm->min_value,
pgSequenceForm->max_value, pgSequenceForm->start_value,
pgSequenceForm->is_cycled ? "" : "NO ");
#endif
return sequenceDef; return sequenceDef;
} }
@ -230,7 +223,6 @@ pg_get_sequencedef(Oid sequenceRelationId)
Form_pg_sequence pgSequenceForm = NULL; Form_pg_sequence pgSequenceForm = NULL;
HeapTuple heapTuple = NULL; HeapTuple heapTuple = NULL;
#if (PG_VERSION_NUM >= 100000)
heapTuple = SearchSysCache1(SEQRELID, sequenceRelationId); heapTuple = SearchSysCache1(SEQRELID, sequenceRelationId);
if (!HeapTupleIsValid(heapTuple)) if (!HeapTupleIsValid(heapTuple))
{ {
@ -240,38 +232,6 @@ pg_get_sequencedef(Oid sequenceRelationId)
pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple); pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple);
ReleaseSysCache(heapTuple); ReleaseSysCache(heapTuple);
#else
SysScanDesc scanDescriptor = NULL;
Relation sequenceRel = NULL;
AclResult permissionCheck = ACLCHECK_NO_PRIV;
/* open and lock sequence */
sequenceRel = heap_open(sequenceRelationId, AccessShareLock);
/* check permissions to read sequence attributes */
permissionCheck = pg_class_aclcheck(sequenceRelationId, GetUserId(),
ACL_SELECT | ACL_USAGE);
if (permissionCheck != ACLCHECK_OK)
{
ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for sequence %s",
RelationGetRelationName(sequenceRel))));
}
/* retrieve attributes from first tuple */
scanDescriptor = systable_beginscan(sequenceRel, InvalidOid, false, NULL, 0, NULL);
heapTuple = systable_getnext(scanDescriptor);
if (!HeapTupleIsValid(heapTuple))
{
ereport(ERROR, (errmsg("could not find specified sequence")));
}
pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple);
systable_endscan(scanDescriptor);
heap_close(sequenceRel, AccessShareLock);
#endif
return pgSequenceForm; return pgSequenceForm;
} }
@ -474,13 +434,11 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults)
appendStringInfo(&buffer, " SERVER %s", quote_identifier(serverName)); appendStringInfo(&buffer, " SERVER %s", quote_identifier(serverName));
AppendOptionListToString(&buffer, foreignTable->options); AppendOptionListToString(&buffer, foreignTable->options);
} }
#if (PG_VERSION_NUM >= 100000)
else if (relationKind == RELKIND_PARTITIONED_TABLE) else if (relationKind == RELKIND_PARTITIONED_TABLE)
{ {
char *partitioningInformation = GeneratePartitioningInformation(tableRelationId); char *partitioningInformation = GeneratePartitioningInformation(tableRelationId);
appendStringInfo(&buffer, " PARTITION BY %s ", partitioningInformation); appendStringInfo(&buffer, " PARTITION BY %s ", partitioningInformation);
} }
#endif
/* /*
* Add any reloptions (storage parameters) defined on the table in a WITH * Add any reloptions (storage parameters) defined on the table in a WITH

View File

@ -60,11 +60,7 @@ typedef struct MaintenanceDaemonControlData
* data in MaintenanceDaemonDBHash. * data in MaintenanceDaemonDBHash.
*/ */
int trancheId; int trancheId;
#if (PG_VERSION_NUM >= 100000)
char *lockTrancheName; char *lockTrancheName;
#else
LWLockTranche lockTranche;
#endif
LWLock lock; LWLock lock;
} MaintenanceDaemonControlData; } MaintenanceDaemonControlData;
@ -463,11 +459,7 @@ CitusMaintenanceDaemonMain(Datum main_arg)
* Wait until timeout, or until somebody wakes us up. Also cast the timeout to * Wait until timeout, or until somebody wakes us up. Also cast the timeout to
* integer where we've calculated it using double for not losing the precision. * integer where we've calculated it using double for not losing the precision.
*/ */
#if (PG_VERSION_NUM >= 100000)
rc = WaitLatch(MyLatch, latchFlags, (long) timeout, PG_WAIT_EXTENSION); rc = WaitLatch(MyLatch, latchFlags, (long) timeout, PG_WAIT_EXTENSION);
#else
rc = WaitLatch(MyLatch, latchFlags, (long) timeout);
#endif
/* emergency bailout if postmaster has died */ /* emergency bailout if postmaster has died */
if (rc & WL_POSTMASTER_DEATH) if (rc & WL_POSTMASTER_DEATH)
@ -553,26 +545,10 @@ MaintenanceDaemonShmemInit(void)
*/ */
if (!alreadyInitialized) if (!alreadyInitialized)
{ {
#if (PG_VERSION_NUM >= 100000)
MaintenanceDaemonControl->trancheId = LWLockNewTrancheId(); MaintenanceDaemonControl->trancheId = LWLockNewTrancheId();
MaintenanceDaemonControl->lockTrancheName = "Citus Maintenance Daemon"; MaintenanceDaemonControl->lockTrancheName = "Citus Maintenance Daemon";
LWLockRegisterTranche(MaintenanceDaemonControl->trancheId, LWLockRegisterTranche(MaintenanceDaemonControl->trancheId,
MaintenanceDaemonControl->lockTrancheName); MaintenanceDaemonControl->lockTrancheName);
#else
/* initialize lwlock */
LWLockTranche *tranche = &MaintenanceDaemonControl->lockTranche;
/* start by zeroing out all the memory */
memset(MaintenanceDaemonControl, 0, MaintenanceDaemonShmemSize());
/* initialize lock */
MaintenanceDaemonControl->trancheId = LWLockNewTrancheId();
tranche->array_base = &MaintenanceDaemonControl->lock;
tranche->array_stride = sizeof(LWLock);
tranche->name = "Citus Maintenance Daemon";
LWLockRegisterTranche(MaintenanceDaemonControl->trancheId, tranche);
#endif
LWLockInitialize(&MaintenanceDaemonControl->lock, LWLockInitialize(&MaintenanceDaemonControl->lock,
MaintenanceDaemonControl->trancheId); MaintenanceDaemonControl->trancheId);

View File

@ -10,9 +10,7 @@
#include "access/heapam.h" #include "access/heapam.h"
#include "access/htup_details.h" #include "access/htup_details.h"
#include "catalog/indexing.h" #include "catalog/indexing.h"
#if (PG_VERSION_NUM >= 100000)
#include "catalog/partition.h" #include "catalog/partition.h"
#endif
#include "catalog/pg_class.h" #include "catalog/pg_class.h"
#include "catalog/pg_inherits.h" #include "catalog/pg_inherits.h"
#if (PG_VERSION_NUM < 110000) #if (PG_VERSION_NUM < 110000)
@ -33,9 +31,7 @@
#include "utils/syscache.h" #include "utils/syscache.h"
#if (PG_VERSION_NUM >= 100000)
static char * PartitionBound(Oid partitionId); static char * PartitionBound(Oid partitionId);
#endif
/* /*
@ -47,12 +43,10 @@ PartitionedTable(Oid relationId)
Relation rel = heap_open(relationId, AccessShareLock); Relation rel = heap_open(relationId, AccessShareLock);
bool partitionedTable = false; bool partitionedTable = false;
#if (PG_VERSION_NUM >= 100000)
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{ {
partitionedTable = true; partitionedTable = true;
} }
#endif
/* keep the lock */ /* keep the lock */
heap_close(rel, NoLock); heap_close(rel, NoLock);
@ -78,12 +72,10 @@ PartitionedTableNoLock(Oid relationId)
return false; return false;
} }
#if (PG_VERSION_NUM >= 100000)
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{ {
partitionedTable = true; partitionedTable = true;
} }
#endif
/* keep the lock */ /* keep the lock */
heap_close(rel, NoLock); heap_close(rel, NoLock);
@ -101,9 +93,7 @@ PartitionTable(Oid relationId)
Relation rel = heap_open(relationId, AccessShareLock); Relation rel = heap_open(relationId, AccessShareLock);
bool partitionTable = false; bool partitionTable = false;
#if (PG_VERSION_NUM >= 100000)
partitionTable = rel->rd_rel->relispartition; partitionTable = rel->rd_rel->relispartition;
#endif
/* keep the lock */ /* keep the lock */
heap_close(rel, NoLock); heap_close(rel, NoLock);
@ -129,9 +119,7 @@ PartitionTableNoLock(Oid relationId)
return false; return false;
} }
#if (PG_VERSION_NUM >= 100000)
partitionTable = rel->rd_rel->relispartition; partitionTable = rel->rd_rel->relispartition;
#endif
/* keep the lock */ /* keep the lock */
heap_close(rel, NoLock); heap_close(rel, NoLock);
@ -237,9 +225,7 @@ PartitionParentOid(Oid partitionOid)
{ {
Oid partitionParentOid = InvalidOid; Oid partitionParentOid = InvalidOid;
#if (PG_VERSION_NUM >= 100000)
partitionParentOid = get_partition_parent(partitionOid); partitionParentOid = get_partition_parent(partitionOid);
#endif
return partitionParentOid; return partitionParentOid;
} }
@ -255,7 +241,6 @@ PartitionList(Oid parentRelationId)
Relation rel = heap_open(parentRelationId, AccessShareLock); Relation rel = heap_open(parentRelationId, AccessShareLock);
List *partitionList = NIL; List *partitionList = NIL;
#if (PG_VERSION_NUM >= 100000)
int partitionIndex = 0; int partitionIndex = 0;
int partitionCount = 0; int partitionCount = 0;
@ -274,7 +259,6 @@ PartitionList(Oid parentRelationId)
partitionList = partitionList =
lappend_oid(partitionList, rel->rd_partdesc->oids[partitionIndex]); lappend_oid(partitionList, rel->rd_partdesc->oids[partitionIndex]);
} }
#endif
/* keep the lock */ /* keep the lock */
heap_close(rel, NoLock); heap_close(rel, NoLock);
@ -291,8 +275,6 @@ char *
GenerateDetachPartitionCommand(Oid partitionTableId) GenerateDetachPartitionCommand(Oid partitionTableId)
{ {
StringInfo detachPartitionCommand = makeStringInfo(); StringInfo detachPartitionCommand = makeStringInfo();
#if (PG_VERSION_NUM >= 100000)
Oid parentId = InvalidOid; Oid parentId = InvalidOid;
char *tableQualifiedName = NULL; char *tableQualifiedName = NULL;
char *parentTableQualifiedName = NULL; char *parentTableQualifiedName = NULL;
@ -311,7 +293,6 @@ GenerateDetachPartitionCommand(Oid partitionTableId)
appendStringInfo(detachPartitionCommand, appendStringInfo(detachPartitionCommand,
"ALTER TABLE IF EXISTS %s DETACH PARTITION %s;", "ALTER TABLE IF EXISTS %s DETACH PARTITION %s;",
parentTableQualifiedName, tableQualifiedName); parentTableQualifiedName, tableQualifiedName);
#endif
return detachPartitionCommand->data; return detachPartitionCommand->data;
} }
@ -325,8 +306,6 @@ char *
GeneratePartitioningInformation(Oid parentTableId) GeneratePartitioningInformation(Oid parentTableId)
{ {
char *partitionBoundCString = ""; char *partitionBoundCString = "";
#if (PG_VERSION_NUM >= 100000)
Datum partitionBoundDatum = 0; Datum partitionBoundDatum = 0;
if (!PartitionedTable(parentTableId)) if (!PartitionedTable(parentTableId))
@ -340,7 +319,6 @@ GeneratePartitioningInformation(Oid parentTableId)
ObjectIdGetDatum(parentTableId)); ObjectIdGetDatum(parentTableId));
partitionBoundCString = TextDatumGetCString(partitionBoundDatum); partitionBoundCString = TextDatumGetCString(partitionBoundDatum);
#endif
return partitionBoundCString; return partitionBoundCString;
} }
@ -398,8 +376,6 @@ char *
GenerateAlterTableAttachPartitionCommand(Oid partitionTableId) GenerateAlterTableAttachPartitionCommand(Oid partitionTableId)
{ {
StringInfo createPartitionCommand = makeStringInfo(); StringInfo createPartitionCommand = makeStringInfo();
#if (PG_VERSION_NUM >= 100000)
char *partitionBoundCString = NULL; char *partitionBoundCString = NULL;
Oid parentId = InvalidOid; Oid parentId = InvalidOid;
@ -422,14 +398,11 @@ GenerateAlterTableAttachPartitionCommand(Oid partitionTableId)
appendStringInfo(createPartitionCommand, "ALTER TABLE %s ATTACH PARTITION %s %s;", appendStringInfo(createPartitionCommand, "ALTER TABLE %s ATTACH PARTITION %s %s;",
parentTableQualifiedName, tableQualifiedName, parentTableQualifiedName, tableQualifiedName,
partitionBoundCString); partitionBoundCString);
#endif
return createPartitionCommand->data; return createPartitionCommand->data;
} }
#if (PG_VERSION_NUM >= 100000)
/* /*
* This function heaviliy inspired from RelationBuildPartitionDesc() * This function heaviliy inspired from RelationBuildPartitionDesc()
* which is avaliable in src/backend/catalog/partition.c. * which is avaliable in src/backend/catalog/partition.c.
@ -479,6 +452,3 @@ PartitionBound(Oid partitionId)
return partitionBoundString; return partitionBoundString;
} }
#endif

View File

@ -38,9 +38,7 @@
#include "storage/lmgr.h" #include "storage/lmgr.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#if (PG_VERSION_NUM >= 100000)
#include "utils/varlena.h" #include "utils/varlena.h"
#endif
/* static definition and declarations */ /* static definition and declarations */

File diff suppressed because it is too large Load Diff

View File

@ -13,10 +13,7 @@
#include "citus_version.h" #include "citus_version.h"
#include "fmgr.h" #include "fmgr.h"
#include "utils/uuid.h" #include "utils/uuid.h"
#if PG_VERSION_NUM >= 100000
#include "utils/backend_random.h" #include "utils/backend_random.h"
#endif
bool EnableStatisticsCollection = true; /* send basic usage statistics to Citus */ bool EnableStatisticsCollection = true; /* send basic usage statistics to Citus */
@ -48,10 +45,7 @@ typedef struct utsname
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/json.h" #include "utils/json.h"
#include "utils/jsonb.h" #include "utils/jsonb.h"
#if PG_VERSION_NUM >= 100000
#include "utils/fmgrprotos.h" #include "utils/fmgrprotos.h"
#endif
static size_t StatisticsCallback(char *contents, size_t size, size_t count, static size_t StatisticsCallback(char *contents, size_t size, size_t count,
void *userData); void *userData);
@ -605,15 +599,12 @@ citus_server_id(PG_FUNCTION_ARGS)
{ {
uint8 *buf = (uint8 *) palloc(UUID_LEN); uint8 *buf = (uint8 *) palloc(UUID_LEN);
#if PG_VERSION_NUM >= 100000
/* /*
* If pg_backend_random() fails, fall-back to using random(). In previous * If pg_backend_random() fails, fall-back to using random(). In previous
* versions of postgres we don't have pg_backend_random(), so use it by * versions of postgres we don't have pg_backend_random(), so use it by
* default in that case. * default in that case.
*/ */
if (!pg_backend_random((char *) buf, UUID_LEN)) if (!pg_backend_random((char *) buf, UUID_LEN))
#endif
{ {
int bufIdx = 0; int bufIdx = 0;
for (bufIdx = 0; bufIdx < UUID_LEN; bufIdx++) for (bufIdx = 0; bufIdx < UUID_LEN; bufIdx++)

View File

@ -588,22 +588,10 @@ TaskTrackerShmemInit(void)
if (!alreadyInitialized) if (!alreadyInitialized)
{ {
#if (PG_VERSION_NUM >= 100000)
WorkerTasksSharedState->taskHashTrancheId = LWLockNewTrancheId(); WorkerTasksSharedState->taskHashTrancheId = LWLockNewTrancheId();
WorkerTasksSharedState->taskHashTrancheName = "Worker Task Hash Tranche"; WorkerTasksSharedState->taskHashTrancheName = "Worker Task Hash Tranche";
LWLockRegisterTranche(WorkerTasksSharedState->taskHashTrancheId, LWLockRegisterTranche(WorkerTasksSharedState->taskHashTrancheId,
WorkerTasksSharedState->taskHashTrancheName); WorkerTasksSharedState->taskHashTrancheName);
#else
/* initialize lwlock protecting the task tracker hash table */
LWLockTranche *tranche = &WorkerTasksSharedState->taskHashLockTranche;
WorkerTasksSharedState->taskHashTrancheId = LWLockNewTrancheId();
tranche->array_base = &WorkerTasksSharedState->taskHashLock;
tranche->array_stride = sizeof(LWLock);
tranche->name = "Worker Task Hash Tranche";
LWLockRegisterTranche(WorkerTasksSharedState->taskHashTrancheId, tranche);
#endif
LWLockInitialize(&WorkerTasksSharedState->taskHashLock, LWLockInitialize(&WorkerTasksSharedState->taskHashLock,
WorkerTasksSharedState->taskHashTrancheId); WorkerTasksSharedState->taskHashTrancheId);

View File

@ -347,13 +347,8 @@ CreateJobSchema(StringInfo schemaName)
createSchemaStmt->schemaElts = NIL; createSchemaStmt->schemaElts = NIL;
/* actually create schema with the current user as owner */ /* actually create schema with the current user as owner */
#if (PG_VERSION_NUM >= 100000)
createSchemaStmt->authrole = &currentUserRole; createSchemaStmt->authrole = &currentUserRole;
CreateSchemaCommand(createSchemaStmt, queryString, -1, -1); CreateSchemaCommand(createSchemaStmt, queryString, -1, -1);
#else
createSchemaStmt->authrole = (Node *) &currentUserRole;
CreateSchemaCommand(createSchemaStmt, queryString);
#endif
CommandCounterIncrement(); CommandCounterIncrement();

View File

@ -48,10 +48,8 @@
#include "tcop/utility.h" #include "tcop/utility.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#if (PG_VERSION_NUM >= 100000)
#include "utils/regproc.h" #include "utils/regproc.h"
#include "utils/varlena.h" #include "utils/varlena.h"
#endif
/* Local functions forward declarations */ /* Local functions forward declarations */
@ -660,9 +658,7 @@ ParseTreeNode(const char *ddlCommand)
{ {
Node *parseTreeNode = ParseTreeRawStmt(ddlCommand); Node *parseTreeNode = ParseTreeRawStmt(ddlCommand);
#if (PG_VERSION_NUM >= 100000)
parseTreeNode = ((RawStmt *) parseTreeNode)->stmt; parseTreeNode = ((RawStmt *) parseTreeNode)->stmt;
#endif
return parseTreeNode; return parseTreeNode;
} }
@ -874,13 +870,8 @@ AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName)
Form_pg_sequence sequenceData = pg_get_sequencedef(sequenceId); Form_pg_sequence sequenceData = pg_get_sequencedef(sequenceId);
int64 startValue = 0; int64 startValue = 0;
int64 maxValue = 0; int64 maxValue = 0;
#if (PG_VERSION_NUM >= 100000)
int64 sequenceMaxValue = sequenceData->seqmax; int64 sequenceMaxValue = sequenceData->seqmax;
int64 sequenceMinValue = sequenceData->seqmin; int64 sequenceMinValue = sequenceData->seqmin;
#else
int64 sequenceMaxValue = sequenceData->max_value;
int64 sequenceMinValue = sequenceData->min_value;
#endif
/* calculate min/max values that the sequence can generate in this worker */ /* calculate min/max values that the sequence can generate in this worker */
@ -951,11 +942,7 @@ SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg)
} }
} }
#if (PG_VERSION_NUM >= 100000)
defElem = makeDefElem((char *) name, arg, -1); defElem = makeDefElem((char *) name, arg, -1);
#else
defElem = makeDefElem((char *) name, arg);
#endif
statement->options = lappend(statement->options, defElem); statement->options = lappend(statement->options, defElem);
} }

View File

@ -375,15 +375,11 @@ RemoveJobSchema(StringInfo schemaName)
* can suppress notice messages that are typically displayed during * can suppress notice messages that are typically displayed during
* cascading deletes. * cascading deletes.
*/ */
#if (PG_VERSION_NUM >= 100000)
performDeletion(&schemaObject, DROP_CASCADE, performDeletion(&schemaObject, DROP_CASCADE,
PERFORM_DELETION_INTERNAL | PERFORM_DELETION_INTERNAL |
PERFORM_DELETION_QUIETLY | PERFORM_DELETION_QUIETLY |
PERFORM_DELETION_SKIP_ORIGINAL | PERFORM_DELETION_SKIP_ORIGINAL |
PERFORM_DELETION_SKIP_EXTENSIONS); PERFORM_DELETION_SKIP_EXTENSIONS);
#else
deleteWhatDependsOn(&schemaObject, false);
#endif
CommandCounterIncrement(); CommandCounterIncrement();
@ -423,12 +419,8 @@ CreateTaskTable(StringInfo schemaName, StringInfo relationName,
createStatement = CreateStatement(relation, columnDefinitionList); createStatement = CreateStatement(relation, columnDefinitionList);
#if (PG_VERSION_NUM >= 100000)
relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL, relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL,
NULL); NULL);
#else
relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL);
#endif
relationId = relationObject.objectId; relationId = relationObject.objectId;
Assert(relationId != InvalidOid); Assert(relationId != InvalidOid);
@ -572,16 +564,11 @@ CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName,
copyStatement = CopyStatement(relation, fullFilename->data); copyStatement = CopyStatement(relation, fullFilename->data);
if (BinaryWorkerCopyFormat) if (BinaryWorkerCopyFormat)
{ {
#if (PG_VERSION_NUM >= 100000)
DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary"), DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary"),
-1); -1);
#else
DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary"));
#endif
copyStatement->options = list_make1(copyOption); copyStatement->options = list_make1(copyOption);
} }
#if (PG_VERSION_NUM >= 100000)
{ {
ParseState *pstate = make_parsestate(NULL); ParseState *pstate = make_parsestate(NULL);
pstate->p_sourcetext = queryString; pstate->p_sourcetext = queryString;
@ -590,9 +577,7 @@ CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName,
free_parsestate(pstate); free_parsestate(pstate);
} }
#else
DoCopy(copyStatement, queryString, &copiedRowCount);
#endif
copiedRowTotal += copiedRowCount; copiedRowTotal += copiedRowCount;
CommandCounterIncrement(); CommandCounterIncrement();
} }

View File

@ -853,12 +853,8 @@ FileOutputStreamFlush(FileOutputStream file)
int written = 0; int written = 0;
errno = 0; errno = 0;
#if (PG_VERSION_NUM >= 100000)
written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len, written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len,
PG_WAIT_IO); PG_WAIT_IO);
#else
written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len);
#endif
if (written != fileBuffer->len) if (written != fileBuffer->len)
{ {
ereport(ERROR, (errcode_for_file_access(), ereport(ERROR, (errcode_for_file_access(),

View File

@ -251,11 +251,7 @@ TaskFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest)
static void static void
WriteToLocalFile(StringInfo copyData, File fileDesc) WriteToLocalFile(StringInfo copyData, File fileDesc)
{ {
#if (PG_VERSION_NUM >= 100000)
int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len, PG_WAIT_IO); int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len, PG_WAIT_IO);
#else
int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len);
#endif
if (bytesWritten < 0) if (bytesWritten < 0)
{ {
ereport(ERROR, (errcode_for_file_access(), ereport(ERROR, (errcode_for_file_access(),

View File

@ -39,9 +39,6 @@ extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
ProcessUtilityContext context, ParamListInfo params, ProcessUtilityContext context, ParamListInfo params,
struct QueryEnvironment *queryEnv, DestReceiver *dest, struct QueryEnvironment *queryEnv, DestReceiver *dest,
char *completionTag); char *completionTag);
extern void multi_ProcessUtility9x(Node *parsetree, const char *queryString,
ProcessUtilityContext context, ParamListInfo params,
DestReceiver *dest, char *completionTag);
extern void CitusProcessUtility(Node *node, const char *queryString, extern void CitusProcessUtility(Node *node, const char *queryString,
ProcessUtilityContext context, ParamListInfo params, ProcessUtilityContext context, ParamListInfo params,
DestReceiver *dest, char *completionTag); DestReceiver *dest, char *completionTag);

View File

@ -17,13 +17,6 @@
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#if (PG_VERSION_NUM < 100000)
/* define symbols that are undefined in PostgreSQL <= 9.6 */
#define DSM_HANDLE_INVALID 0
extern Datum pg_stat_get_progress_info(PG_FUNCTION_ARGS);
#endif
typedef struct ProgressMonitorData typedef struct ProgressMonitorData
{ {
uint64 processId; uint64 processId;

View File

@ -105,8 +105,6 @@ typedef struct WorkerTasksSharedStateData
int taskHashTrancheId; int taskHashTrancheId;
#if (PG_VERSION_NUM >= 100000) #if (PG_VERSION_NUM >= 100000)
char *taskHashTrancheName; char *taskHashTrancheName;
#else
LWLockTranche taskHashLockTranche;
#endif #endif
LWLock taskHashLock; LWLock taskHashLock;
bool conninfosValid; bool conninfosValid;

View File

@ -16,19 +16,7 @@
#include "catalog/namespace.h" #include "catalog/namespace.h"
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
#if (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 90700) #if (PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000)
/* Backports from PostgreSQL 10 */
/* Accessor for the i'th attribute of tupdesc. */
#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)])
#endif
#if (PG_VERSION_NUM < 100000)
struct QueryEnvironment; /* forward-declare to appease compiler */
#endif
#if (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 110000)
#include "access/hash.h" #include "access/hash.h"
#include "storage/fd.h" #include "storage/fd.h"

View File

@ -1,6 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count
setup failed: ERROR: syntax error at or near "PARTITION"
LINE 3: ...itioned_copy(id integer, data text, int_data int) PARTITION ...
^

View File

@ -1,35 +0,0 @@
--
-- MULTI_CREATE_TABLE_NEW_FEATURES
--
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
major_version
---------------
9
(1 row)
-- Verify that the GENERATED ... AS IDENTITY feature in PostgreSQL 10
-- is forbidden in distributed tables.
CREATE TABLE table_identity_col (
id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
payload text );
ERROR: syntax error at or near "GENERATED"
LINE 2: id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
^
SELECT master_create_distributed_table('table_identity_col', 'id', 'append');
ERROR: relation "table_identity_col" does not exist
LINE 1: SELECT master_create_distributed_table('table_identity_col',...
^
SELECT create_distributed_table('table_identity_col', 'id');
ERROR: relation "table_identity_col" does not exist
LINE 1: SELECT create_distributed_table('table_identity_col', 'id');
^
SELECT create_distributed_table('table_identity_col', 'text');
ERROR: relation "table_identity_col" does not exist
LINE 1: SELECT create_distributed_table('table_identity_col', 'text'...
^
SELECT create_reference_table('table_identity_col');
ERROR: relation "table_identity_col" does not exist
LINE 1: SELECT create_reference_table('table_identity_col');
^

File diff suppressed because it is too large Load Diff

View File

@ -1,304 +0,0 @@
--
-- MULTI_INDEX_STATEMENTS
--
-- Check that we can run CREATE INDEX and DROP INDEX statements on distributed
-- tables.
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int AS major_version;
major_version
---------------
9
(1 row)
--
-- CREATE TEST TABLES
--
SET citus.next_shard_id TO 102080;
CREATE TABLE index_test_range(a int, b int, c int);
SELECT create_distributed_table('index_test_range', 'a', 'range');
create_distributed_table
--------------------------
(1 row)
SELECT master_create_empty_shard('index_test_range');
master_create_empty_shard
---------------------------
102080
(1 row)
SELECT master_create_empty_shard('index_test_range');
master_create_empty_shard
---------------------------
102081
(1 row)
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 2;
CREATE TABLE index_test_hash(a int, b int, c int);
SELECT create_distributed_table('index_test_hash', 'a', 'hash');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE index_test_append(a int, b int, c int);
SELECT create_distributed_table('index_test_append', 'a', 'append');
create_distributed_table
--------------------------
(1 row)
SELECT master_create_empty_shard('index_test_append');
master_create_empty_shard
---------------------------
102090
(1 row)
SELECT master_create_empty_shard('index_test_append');
master_create_empty_shard
---------------------------
102091
(1 row)
--
-- CREATE INDEX
--
-- Verify that we can create different types of indexes
CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey);
CREATE INDEX lineitem_partkey_desc_index ON lineitem (l_partkey DESC);
CREATE INDEX lineitem_partial_index ON lineitem (l_shipdate)
WHERE l_shipdate < '1995-01-01';
CREATE INDEX lineitem_colref_index ON lineitem (record_ne(lineitem.*, NULL));
SET client_min_messages = ERROR; -- avoid version dependant warning about WAL
CREATE INDEX lineitem_orderkey_hash_index ON lineitem USING hash (l_partkey);
CREATE UNIQUE INDEX index_test_range_index_a ON index_test_range(a);
CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range(a,b);
CREATE UNIQUE INDEX index_test_hash_index_a ON index_test_hash(a);
CREATE UNIQUE INDEX index_test_hash_index_a_b ON index_test_hash(a,b);
CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON index_test_hash(a,b) WHERE c IS NOT NULL;
CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range(a,b) WHERE c IS NOT NULL;
CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON index_test_hash(a) INCLUDE (b,c);
ERROR: syntax error at or near "INCLUDE"
LINE 1: ...index_test_hash_index_a_b_c ON index_test_hash(a) INCLUDE (b...
^
RESET client_min_messages;
-- Verify that we handle if not exists statements correctly
CREATE INDEX lineitem_orderkey_index on lineitem(l_orderkey);
ERROR: relation "lineitem_orderkey_index" already exists
CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on lineitem(l_orderkey);
NOTICE: relation "lineitem_orderkey_index" already exists, skipping
CREATE INDEX IF NOT EXISTS lineitem_orderkey_index_new on lineitem(l_orderkey);
-- Verify if not exists behavior with an index with same name on a different table
CREATE INDEX lineitem_orderkey_index on index_test_hash(a);
ERROR: relation "lineitem_orderkey_index" already exists
CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on index_test_hash(a);
NOTICE: relation "lineitem_orderkey_index" already exists, skipping
-- Verify that we can create indexes concurrently
CREATE INDEX CONCURRENTLY lineitem_concurrently_index ON lineitem (l_orderkey);
-- Verify that we warn out on CLUSTER command for distributed tables and no parameter
CLUSTER index_test_hash USING index_test_hash_index_a;
WARNING: not propagating CLUSTER command to worker nodes
CLUSTER;
WARNING: not propagating CLUSTER command to worker nodes
-- Verify that no-name local CREATE INDEX CONCURRENTLY works
CREATE TABLE local_table (id integer, name text);
CREATE INDEX CONCURRENTLY local_table_index ON local_table(id);
-- Vefify we don't warn out on CLUSTER command for local tables
CLUSTER local_table USING local_table_index;
DROP TABLE local_table;
-- Verify that all indexes got created on the master node and one of the workers
SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname;
schemaname | tablename | indexname | tablespace | indexdef
------------+------------------+------------------------------------+------------+----------------------------------------------------------------------------------------------------------------------------
public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a)
public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b)
public | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON public.index_test_hash USING btree (a, b) WHERE (c IS NOT NULL)
public | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON public.index_test_range USING btree (a)
public | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON public.index_test_range USING btree (a, b)
public | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON public.index_test_range USING btree (a, b) WHERE (c IS NOT NULL)
public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON public.lineitem USING btree (record_ne(lineitem.*, NULL::record))
public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey)
public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON public.lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date)
public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC)
public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber)
public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate)
(15 rows)
\c - - - :worker_1_port
SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1);
count
-------
9
(1 row)
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%';
count
-------
24
(1 row)
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%';
count
-------
6
(1 row)
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%';
count
-------
0
(1 row)
\c - - - :master_port
-- Verify that we error out on unsupported statement types
CREATE UNIQUE INDEX try_index ON lineitem (l_orderkey);
ERROR: creating unique indexes on append-partitioned tables is currently unsupported
CREATE INDEX try_index ON lineitem (l_orderkey) TABLESPACE newtablespace;
ERROR: specifying tablespaces with CREATE INDEX statements is currently unsupported
CREATE UNIQUE INDEX try_unique_range_index ON index_test_range(b);
ERROR: creating unique indexes on non-partition columns is currently unsupported
CREATE UNIQUE INDEX try_unique_range_index_partial ON index_test_range(b) WHERE c IS NOT NULL;
ERROR: creating unique indexes on non-partition columns is currently unsupported
CREATE UNIQUE INDEX try_unique_hash_index ON index_test_hash(b);
ERROR: creating unique indexes on non-partition columns is currently unsupported
CREATE UNIQUE INDEX try_unique_hash_index_partial ON index_test_hash(b) WHERE c IS NOT NULL;
ERROR: creating unique indexes on non-partition columns is currently unsupported
CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(b);
ERROR: creating unique indexes on append-partitioned tables is currently unsupported
CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(a);
ERROR: creating unique indexes on append-partitioned tables is currently unsupported
CREATE UNIQUE INDEX try_unique_append_index_a_b ON index_test_append(a,b);
ERROR: creating unique indexes on append-partitioned tables is currently unsupported
-- Verify that we error out in case of postgres errors on supported statement
-- types.
CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey);
ERROR: relation "lineitem_orderkey_index" already exists
CREATE INDEX try_index ON lineitem USING gist (l_orderkey);
ERROR: data type bigint has no default operator class for access method "gist"
HINT: You must specify an operator class for the index or define a default operator class for the data type.
CREATE INDEX try_index ON lineitem (non_existent_column);
ERROR: column "non_existent_column" does not exist
CREATE INDEX ON lineitem (l_orderkey);
ERROR: creating index without a name on a distributed table is currently unsupported
-- Verify that none of failed indexes got created on the master node
SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname;
schemaname | tablename | indexname | tablespace | indexdef
------------+------------------+------------------------------------+------------+----------------------------------------------------------------------------------------------------------------------------
public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a)
public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b)
public | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON public.index_test_hash USING btree (a, b) WHERE (c IS NOT NULL)
public | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON public.index_test_range USING btree (a)
public | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON public.index_test_range USING btree (a, b)
public | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON public.index_test_range USING btree (a, b) WHERE (c IS NOT NULL)
public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON public.lineitem USING btree (record_ne(lineitem.*, NULL::record))
public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey)
public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON public.lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date)
public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC)
public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber)
public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate)
(15 rows)
--
-- DROP INDEX
--
-- Verify that we can't drop multiple indexes in a single command
DROP INDEX lineitem_orderkey_index, lineitem_partial_index;
ERROR: cannot drop multiple distributed objects in a single command
HINT: Try dropping each object in a separate DROP command.
-- Verify that we can succesfully drop indexes
DROP INDEX lineitem_orderkey_index;
DROP INDEX lineitem_orderkey_index_new;
DROP INDEX lineitem_partkey_desc_index;
DROP INDEX lineitem_partial_index;
DROP INDEX lineitem_colref_index;
-- Verify that we handle if exists statements correctly
DROP INDEX non_existent_index;
ERROR: index "non_existent_index" does not exist
DROP INDEX IF EXISTS non_existent_index;
NOTICE: index "non_existent_index" does not exist, skipping
DROP INDEX IF EXISTS lineitem_orderkey_hash_index;
DROP INDEX lineitem_orderkey_hash_index;
ERROR: index "lineitem_orderkey_hash_index" does not exist
DROP INDEX index_test_range_index_a;
DROP INDEX index_test_range_index_a_b;
DROP INDEX index_test_range_index_a_b_partial;
DROP INDEX index_test_hash_index_a;
DROP INDEX index_test_hash_index_a_b;
DROP INDEX index_test_hash_index_a_b_partial;
-- Verify that we can drop indexes concurrently
DROP INDEX CONCURRENTLY lineitem_concurrently_index;
-- Verify that all the indexes are dropped from the master and one worker node.
-- As there's a primary key, so exclude those from this check.
SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%';
indrelid | indexrelid
----------+------------
(0 rows)
SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
schemaname | tablename | indexname | tablespace | indexdef
------------+-----------+-----------+------------+----------
(0 rows)
\c - - - :worker_1_port
SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%';
indrelid | indexrelid
----------+------------
(0 rows)
SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
schemaname | tablename | indexname | tablespace | indexdef
------------+-----------+-----------+------------+----------
(0 rows)
-- create index that will conflict with master operations
CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON index_test_hash_102089(b);
\c - - - :master_port
-- should fail because worker index already exists
CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b);
ERROR: CONCURRENTLY-enabled index command failed
DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index.
HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command.
-- the failure results in an INVALID index
SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass;
Index Valid?
--------------
f
(1 row)
-- we can clean it up and recreate with an DROP IF EXISTS
DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx;
CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b);
SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass;
Index Valid?
--------------
t
(1 row)
\c - - - :worker_1_port
-- now drop shard index to test partial master DROP failure
DROP INDEX CONCURRENTLY ith_b_idx_102089;
\c - - - :master_port
DROP INDEX CONCURRENTLY ith_b_idx;
ERROR: CONCURRENTLY-enabled index command failed
DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index.
HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command.
-- the failure results in an INVALID index
SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass;
Index Valid?
--------------
f
(1 row)
-- final clean up
DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx;
-- Drop created tables
DROP TABLE index_test_range;
DROP TABLE index_test_hash;
DROP TABLE index_test_append;

View File

@ -1,303 +0,0 @@
--
-- MULTI_NULL_MINMAX_VALUE_PRUNING
--
-- This test checks that we can handle null min/max values in shard statistics
-- and that we don't partition or join prune shards that have null values.
SET citus.next_shard_id TO 760000;
-- print major version number for version-specific tests
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int AS server_version;
server_version
----------------
9
(1 row)
SET client_min_messages TO DEBUG2;
SET citus.explain_all_tasks TO on;
-- to avoid differing explain output - executor doesn't matter,
-- because were testing pruning here.
SET citus.task_executor_type TO 'real-time';
-- Change configuration to treat lineitem and orders tables as large
SET citus.log_multi_join_order to true;
SET citus.enable_repartition_joins to ON;
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000;
shardminvalue | shardmaxvalue
---------------+---------------
1 | 5986
(1 row)
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
shardminvalue | shardmaxvalue
---------------+---------------
8997 | 14947
(1 row)
-- Check that partition and join pruning works when min/max values exist
-- Adding l_orderkey = 1 to make the query not router executable
EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
LOG: join order: [ "lineitem" ]
QUERY PLAN
-----------------------------------------------------------------------
Custom Scan (Citus Real-Time)
Task Count: 2
Tasks Shown: All
-> Task
Node: host=localhost port=57637 dbname=regression
-> Bitmap Heap Scan on lineitem_290001 lineitem
Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1))
-> BitmapOr
-> Bitmap Index Scan on lineitem_pkey_290001
Index Cond: (l_orderkey = 9030)
-> Bitmap Index Scan on lineitem_pkey_290001
Index Cond: (l_orderkey = 1)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Bitmap Heap Scan on lineitem_290000 lineitem
Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1))
-> BitmapOr
-> Bitmap Index Scan on lineitem_pkey_290000
Index Cond: (l_orderkey = 9030)
-> Bitmap Index Scan on lineitem_pkey_290000
Index Cond: (l_orderkey = 1)
(21 rows)
EXPLAIN (COSTS FALSE)
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey;
LOG: join order: [ "lineitem" ][ local partition join "orders" ]
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
QUERY PLAN
------------------------------------------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Real-Time)
Task Count: 2
Tasks Shown: All
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Index Only Scan using orders_pkey_290002 on orders_290002 orders
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Merge Join
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Index Only Scan using orders_pkey_290003 on orders_290003 orders
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
(18 rows)
-- Now set the minimum value for a shard to null. Then check that we don't apply
-- partition or join pruning for the shard with null min value. Since it is not
-- supported with single-repartition join, dual-repartition has been used.
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
LOG: join order: [ "lineitem" ]
QUERY PLAN
-------------------------------------------------------------------------------
Custom Scan (Citus Real-Time)
Task Count: 2
Tasks Shown: All
-> Task
Node: host=localhost port=57637 dbname=regression
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
Index Cond: (l_orderkey = 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
Index Cond: (l_orderkey = 9030)
(11 rows)
EXPLAIN (COSTS FALSE)
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_partkey = o_custkey;
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
DEBUG: cannot use real time executor with repartition jobs
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
QUERY PLAN
-------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Task-Tracker)
Task Count: 4
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
Merge Task Count: 4
-> MapMergeJob
Map Task Count: 2
Merge Task Count: 4
(10 rows)
-- Next, set the maximum value for another shard to null. Then check that we
-- don't apply partition or join pruning for this other shard either. Since it
-- is not supported with single-repartition join, dual-repartition has been used.
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
LOG: join order: [ "lineitem" ]
QUERY PLAN
-------------------------------------------------------------------------------
Custom Scan (Citus Real-Time)
Task Count: 2
Tasks Shown: All
-> Task
Node: host=localhost port=57638 dbname=regression
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
Index Cond: (l_orderkey = 9030)
-> Task
Node: host=localhost port=57637 dbname=regression
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
Index Cond: (l_orderkey = 9030)
(11 rows)
EXPLAIN (COSTS FALSE)
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_partkey = o_custkey;
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
DEBUG: cannot use real time executor with repartition jobs
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
QUERY PLAN
-------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Task-Tracker)
Task Count: 4
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
Merge Task Count: 4
-> MapMergeJob
Map Task Count: 2
Merge Task Count: 4
(10 rows)
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
-- should apply partition and join pruning for this shard now. Since it is not
-- supported with single-repartition join, dual-repartition has been used.
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
LOG: join order: [ "lineitem" ]
DEBUG: Plan is router executable
QUERY PLAN
-------------------------------------------------------------------------------
Custom Scan (Citus Router)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=57637 dbname=regression
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
Index Cond: (l_orderkey = 9030)
(7 rows)
EXPLAIN (COSTS FALSE)
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_partkey = o_custkey;
LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
DEBUG: cannot use real time executor with repartition jobs
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
QUERY PLAN
-------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Task-Tracker)
Task Count: 4
Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob
Map Task Count: 2
Merge Task Count: 4
-> MapMergeJob
Map Task Count: 2
Merge Task Count: 4
(10 rows)
-- Set minimum and maximum values for two shards back to their original values
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000;
UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001;
SET client_min_messages TO NOTICE;

File diff suppressed because it is too large Load Diff

View File

@ -1,372 +0,0 @@
-- This test has different output per major version
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int as server_major_version;
server_major_version
----------------------
9
(1 row)
-- ===================================================================
-- create test functions
-- ===================================================================
CREATE FUNCTION generate_alter_table_detach_partition_command(regclass)
RETURNS text
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION generate_alter_table_attach_partition_command(regclass)
RETURNS text
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION generate_partition_information(regclass)
RETURNS text
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION print_partitions(regclass)
RETURNS text
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION table_inherits(regclass)
RETURNS bool
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION table_inherited(regclass)
RETURNS bool
AS 'citus'
LANGUAGE C STRICT;
CREATE OR REPLACE FUNCTION detach_and_attach_partition(partition_name regclass, parent_table_name regclass)
RETURNS void LANGUAGE plpgsql VOLATILE
AS $function$
DECLARE
detach_partition_command text := '';
attach_partition_command text := '';
command_result text := '';
BEGIN
-- first generate the command
SELECT public.generate_alter_table_attach_partition_command(partition_name) INTO attach_partition_command;
-- now genereate the detach command
SELECT public.generate_alter_table_detach_partition_command(partition_name) INTO detach_partition_command;
-- later detach the same partition
EXECUTE detach_partition_command;
-- not attach it again
EXECUTE attach_partition_command;
END;
$function$;
CREATE OR REPLACE FUNCTION drop_and_recreate_partitioned_table(parent_table_name regclass)
RETURNS void LANGUAGE plpgsql VOLATILE
AS $function$
DECLARE
command text := '';
BEGIN
-- first generate the command
CREATE TABLE partitioned_table_create_commands AS SELECT master_get_table_ddl_events(parent_table_name::text);
-- later detach the same partition
EXECUTE 'DROP TABLE ' || parent_table_name::text || ';';
FOR command IN SELECT * FROM partitioned_table_create_commands
LOOP
-- can do some processing here
EXECUTE command;
END LOOP;
DROP TABLE partitioned_table_create_commands;
END;
$function$;
-- create a partitioned table
CREATE TABLE date_partitioned_table(id int, time date) PARTITION BY RANGE (time);
ERROR: syntax error at or near "PARTITION"
LINE 1: ...E TABLE date_partitioned_table(id int, time date) PARTITION ...
^
-- we should be able to get the partitioning information even if there are no partitions
SELECT generate_partition_information('date_partitioned_table');
ERROR: relation "date_partitioned_table" does not exist
LINE 1: SELECT generate_partition_information('date_partitioned_tabl...
^
-- we should be able to drop and re-create the partitioned table using the command that Citus generate
SELECT drop_and_recreate_partitioned_table('date_partitioned_table');
ERROR: relation "date_partitioned_table" does not exist
LINE 1: SELECT drop_and_recreate_partitioned_table('date_partitioned...
^
-- we should also be able to see the PARTITION BY ... for the parent table
SELECT master_get_table_ddl_events('date_partitioned_table');
ERROR: relation "date_partitioned_table" does not exist
-- now create the partitions
CREATE TABLE date_partition_2006 PARTITION OF date_partitioned_table FOR VALUES FROM ('2006-01-01') TO ('2007-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE date_partition_2006 PARTITION OF date_partition...
^
CREATE TABLE date_partition_2007 PARTITION OF date_partitioned_table FOR VALUES FROM ('2007-01-01') TO ('2008-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE date_partition_2007 PARTITION OF date_partition...
^
-- we should be able to get the partitioning information after the partitions are created
SELECT generate_partition_information('date_partitioned_table');
ERROR: relation "date_partitioned_table" does not exist
LINE 1: SELECT generate_partition_information('date_partitioned_tabl...
^
-- lets get the attach partition commands
SELECT generate_alter_table_attach_partition_command('date_partition_2006');
ERROR: relation "date_partition_2006" does not exist
LINE 1: ...ECT generate_alter_table_attach_partition_command('date_part...
^
SELECT generate_alter_table_attach_partition_command('date_partition_2007');
ERROR: relation "date_partition_2007" does not exist
LINE 1: ...ECT generate_alter_table_attach_partition_command('date_part...
^
-- detach and attach the partition by the command generated by us
\d+ date_partitioned_table
SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_table');
ERROR: relation "date_partition_2007" does not exist
LINE 1: SELECT detach_and_attach_partition('date_partition_2007', 'd...
^
-- check that both partitions are visiable
\d+ date_partitioned_table
-- make sure that inter shard commands work as expected
-- assume that the shardId is 100
CREATE TABLE date_partitioned_table_100 (id int, time date) PARTITION BY RANGE (time);
ERROR: syntax error at or near "PARTITION"
LINE 1: ...LE date_partitioned_table_100 (id int, time date) PARTITION ...
^
CREATE TABLE date_partition_2007_100 (id int, time date );
-- now create the partitioning hierarcy
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',
referenced_shard:=100, referenced_schema_name:='public',
command:='ALTER TABLE date_partitioned_table ATTACH PARTITION date_partition_2007 FOR VALUES FROM (''2007-01-01'') TO (''2008-01-02'')' );
ERROR: syntax error at or near "ATTACH"
LINE 1: SELECT worker_apply_inter_shard_ddl_command(referencing_shar...
^
-- the hierarcy is successfully created
\d+ date_partitioned_table_100
-- Citus can also get the DDL events for the partitions as regular tables
SELECT master_get_table_ddl_events('date_partition_2007_100');
master_get_table_ddl_events
-----------------------------------------------------------------------
CREATE TABLE public.date_partition_2007_100 (id integer, "time" date)
ALTER TABLE public.date_partition_2007_100 OWNER TO postgres
(2 rows)
-- now break the partitioning hierarcy
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',
referenced_shard:=100, referenced_schema_name:='public',
command:='ALTER TABLE date_partitioned_table DETACH PARTITION date_partition_2007' );
ERROR: syntax error at or near "DETACH"
LINE 1: SELECT worker_apply_inter_shard_ddl_command(referencing_shar...
^
-- the hierarcy is successfully broken
\d+ date_partitioned_table_100
-- now lets have some more complex partitioning hierarcies with
-- tables on different schemas and constraints on the tables
CREATE SCHEMA partition_parent_schema;
CREATE TABLE partition_parent_schema.parent_table (id int NOT NULL, time date DEFAULT now()) PARTITION BY RANGE (time);
ERROR: syntax error at or near "PARTITION"
LINE 1: ..._table (id int NOT NULL, time date DEFAULT now()) PARTITION ...
^
CREATE SCHEMA partition_child_1_schema;
CREATE TABLE partition_child_1_schema.child_1 (id int NOT NULL, time date );
CREATE SCHEMA partition_child_2_schema;
CREATE TABLE partition_child_2_schema.child_2 (id int NOT NULL, time date );
-- we should be able to get the partitioning information even if there are no partitions
SELECT generate_partition_information('partition_parent_schema.parent_table');
ERROR: relation "partition_parent_schema.parent_table" does not exist
LINE 1: SELECT generate_partition_information('partition_parent_sche...
^
-- we should be able to drop and re-create the partitioned table using the command that Citus generate
SELECT drop_and_recreate_partitioned_table('partition_parent_schema.parent_table');
ERROR: relation "partition_parent_schema.parent_table" does not exist
LINE 1: SELECT drop_and_recreate_partitioned_table('partition_parent...
^
ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('2009-01-01') TO ('2010-01-02');
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE partition_parent_schema.parent_table ATTACH PART...
^
SET search_path = 'partition_parent_schema';
ALTER TABLE parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('2006-01-01') TO ('2007-01-01');
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE parent_table ATTACH PARTITION partition_child_2...
^
SELECT public.generate_partition_information('parent_table');
ERROR: relation "parent_table" does not exist
LINE 1: SELECT public.generate_partition_information('parent_table')...
^
-- lets get the attach partition commands
SELECT public.generate_alter_table_attach_partition_command('partition_child_1_schema.child_1');
generate_alter_table_attach_partition_command
-----------------------------------------------
(1 row)
SET search_path = 'partition_child_2_schema';
SELECT public.generate_alter_table_attach_partition_command('child_2');
generate_alter_table_attach_partition_command
-----------------------------------------------
(1 row)
SET search_path = 'partition_parent_schema';
-- detach and attach the partition by the command generated by us
\d+ parent_table
SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'parent_table');
ERROR: relation "parent_table" does not exist
LINE 1: ...ach_partition('partition_child_1_schema.child_1', 'parent_ta...
^
-- check that both partitions are visiable
\d+ parent_table
-- some very simple checks that should error out
SELECT public.generate_alter_table_attach_partition_command('parent_table');
ERROR: relation "parent_table" does not exist
LINE 1: ...lic.generate_alter_table_attach_partition_command('parent_ta...
^
SELECT public.generate_partition_information('partition_child_1_schema.child_1');
generate_partition_information
--------------------------------
(1 row)
SELECT public.print_partitions('partition_child_1_schema.child_1');
print_partitions
------------------
(1 row)
-- now pring the partitions
SELECT public.print_partitions('parent_table');
ERROR: relation "parent_table" does not exist
LINE 1: SELECT public.print_partitions('parent_table');
^
SET search_path = 'public';
-- test multi column / expression partitioning with UNBOUNDED ranges
CREATE OR REPLACE FUNCTION some_function(input_val text)
RETURNS text LANGUAGE plpgsql IMMUTABLE
AS $function$
BEGIN
return reverse(input_val);
END;
$function$;
CREATE TABLE multi_column_partitioned (
a int,
b int,
c text
) PARTITION BY RANGE (a, (a+b+1), some_function(upper(c)));
ERROR: syntax error at or near "PARTITION"
LINE 5: ) PARTITION BY RANGE (a, (a+b+1), some_function(upper(c)));
^
CREATE TABLE multi_column_partition_1(
a int,
b int,
c text
);
CREATE TABLE multi_column_partition_2(
a int,
b int,
c text
);
-- partitioning information
SELECT generate_partition_information('multi_column_partitioned');
ERROR: relation "multi_column_partitioned" does not exist
LINE 1: SELECT generate_partition_information('multi_column_partitio...
^
SELECT master_get_table_ddl_events('multi_column_partitioned');
ERROR: relation "multi_column_partitioned" does not exist
SELECT drop_and_recreate_partitioned_table('multi_column_partitioned');
ERROR: relation "multi_column_partitioned" does not exist
LINE 1: SELECT drop_and_recreate_partitioned_table('multi_column_par...
^
-- partitions and their ranges
ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250');
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_...
^
SELECT generate_alter_table_attach_partition_command('multi_column_partition_1');
generate_alter_table_attach_partition_command
-----------------------------------------------
(1 row)
ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE);
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_...
^
SELECT generate_alter_table_attach_partition_command('multi_column_partition_2');
generate_alter_table_attach_partition_command
-----------------------------------------------
(1 row)
SELECT generate_alter_table_detach_partition_command('multi_column_partition_2');
generate_alter_table_detach_partition_command
-----------------------------------------------
(1 row)
-- finally a test with LIST partitioning
CREATE TABLE list_partitioned (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION BY LIST (col1) ;
ERROR: syntax error at or near "PARTITION"
LINE 1: ...ed (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION ...
^
SELECT generate_partition_information('list_partitioned');
ERROR: relation "list_partitioned" does not exist
LINE 1: SELECT generate_partition_information('list_partitioned');
^
SELECT master_get_table_ddl_events('list_partitioned');
ERROR: relation "list_partitioned" does not exist
SELECT drop_and_recreate_partitioned_table('list_partitioned');
ERROR: relation "list_partitioned" does not exist
LINE 1: SELECT drop_and_recreate_partitioned_table('list_partitioned...
^
CREATE TABLE list_partitioned_1 PARTITION OF list_partitioned FOR VALUES IN (100, 101, 102, 103, 104);
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE list_partitioned_1 PARTITION OF list_partitione...
^
SELECT generate_alter_table_attach_partition_command('list_partitioned_1');
ERROR: relation "list_partitioned_1" does not exist
LINE 1: ...ECT generate_alter_table_attach_partition_command('list_part...
^
-- also differentiate partitions and inhereted tables
CREATE TABLE cities (
name text,
population float,
altitude int -- in feet
);
CREATE TABLE capitals (
state char(2)
) INHERITS (cities);
-- returns true since capitals inherits from cities
SELECT table_inherits('capitals');
table_inherits
----------------
t
(1 row)
-- although date_partition_2006 inherits from its parent
-- returns false since the hierarcy is formed via partitioning
SELECT table_inherits('date_partition_2006');
ERROR: relation "date_partition_2006" does not exist
LINE 1: SELECT table_inherits('date_partition_2006');
^
-- returns true since cities inherited by capitals
SELECT table_inherited('cities');
table_inherited
-----------------
t
(1 row)
-- although date_partitioned_table inherited by its partitions
-- returns false since the hierarcy is formed via partitioning
SELECT table_inherited('date_partitioned_table');
ERROR: relation "date_partitioned_table" does not exist
LINE 1: SELECT table_inherited('date_partitioned_table');
^
-- also these are not supported
SELECT master_get_table_ddl_events('capitals');
ERROR: capitals is not a regular, foreign or partitioned table
SELECT master_get_table_ddl_events('cities');
ERROR: cities is not a regular, foreign or partitioned table
-- dropping parents frop the partitions
DROP TABLE date_partitioned_table, multi_column_partitioned, list_partitioned, partition_parent_schema.parent_table, cities, capitals;
ERROR: table "date_partitioned_table" does not exist

View File

@ -1,246 +0,0 @@
-- ===================================================================
-- test recursive planning functionality on partitioned tables
-- ===================================================================
CREATE SCHEMA subquery_and_partitioning;
SET search_path TO subquery_and_partitioning, public;
CREATE TABLE users_table_local AS SELECT * FROM users_table;
CREATE TABLE events_table_local AS SELECT * FROM events_table;
CREATE TABLE partitioning_test(id int, value_1 int, time date) PARTITION BY RANGE (time);
ERROR: syntax error at or near "PARTITION"
LINE 1: ...partitioning_test(id int, value_1 int, time date) PARTITION ...
^
-- create its partitions
CREATE TABLE partitioning_test_2017 PARTITION OF partitioning_test FOR VALUES FROM ('2017-01-01') TO ('2018-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partitioning_test_2017 PARTITION OF partitionin...
^
CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partitioning_test_2010 PARTITION OF partitionin...
^
-- load some data and distribute tables
INSERT INTO partitioning_test VALUES (1, 1, '2017-11-23');
ERROR: relation "partitioning_test" does not exist
LINE 1: INSERT INTO partitioning_test VALUES (1, 1, '2017-11-23');
^
INSERT INTO partitioning_test VALUES (2, 1, '2010-07-07');
ERROR: relation "partitioning_test" does not exist
LINE 1: INSERT INTO partitioning_test VALUES (2, 1, '2010-07-07');
^
INSERT INTO partitioning_test_2017 VALUES (3, 3, '2017-11-22');
ERROR: relation "partitioning_test_2017" does not exist
LINE 1: INSERT INTO partitioning_test_2017 VALUES (3, 3, '2017-11-22...
^
INSERT INTO partitioning_test_2010 VALUES (4, 4, '2010-03-03');
ERROR: relation "partitioning_test_2010" does not exist
LINE 1: INSERT INTO partitioning_test_2010 VALUES (4, 4, '2010-03-03...
^
-- distribute partitioned table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('partitioning_test', 'id');
ERROR: relation "partitioning_test" does not exist
LINE 1: SELECT create_distributed_table('partitioning_test', 'id');
^
SET client_min_messages TO DEBUG1;
-- subplan for partitioned tables
SELECT
id
FROM
(SELECT
DISTINCT partitioning_test.id
FROM
partitioning_test
LIMIT 5
) as foo
ORDER BY 1 DESC;
ERROR: relation "partitioning_test" does not exist
LINE 7: partitioning_test
^
-- final query is router on partitioned tables
SELECT
*
FROM
(SELECT
DISTINCT partitioning_test.id
FROM
partitioning_test
LIMIT 5
) as foo,
(SELECT
DISTINCT partitioning_test.time
FROM
partitioning_test
LIMIT 5
) as bar
WHERE foo.id = date_part('day', bar.time)
ORDER BY 2 DESC, 1;
ERROR: relation "partitioning_test" does not exist
LINE 7: partitioning_test
^
-- final query is real-time
SELECT
*
FROM
(SELECT
DISTINCT partitioning_test.time
FROM
partitioning_test
ORDER BY 1 DESC
LIMIT 5
) as foo,
(
SELECT
DISTINCT partitioning_test.id
FROM
partitioning_test
) as bar
WHERE date_part('day', foo.time) = bar.id
ORDER BY 2 DESC, 1 DESC
LIMIT 3;
ERROR: relation "partitioning_test" does not exist
LINE 7: partitioning_test
^
-- final query is real-time that is joined with partitioned table
SELECT
*
FROM
(SELECT
DISTINCT partitioning_test.time
FROM
partitioning_test
ORDER BY 1 DESC
LIMIT 5
) as foo,
(
SELECT
DISTINCT partitioning_test.id
FROM
partitioning_test
) as bar,
partitioning_test
WHERE date_part('day', foo.time) = bar.id AND partitioning_test.id = bar.id
ORDER BY 2 DESC, 1 DESC
LIMIT 3;
ERROR: relation "partitioning_test" does not exist
LINE 7: partitioning_test
^
-- subquery in WHERE clause
SELECT DISTINCT id
FROM partitioning_test
WHERE
id IN (SELECT DISTINCT date_part('day', time) FROM partitioning_test);
ERROR: relation "partitioning_test" does not exist
LINE 2: FROM partitioning_test
^
-- repartition subquery
SET citus.enable_repartition_joins to ON;
SELECT
count(*)
FROM
(
SELECT DISTINCT p1.value_1 FROM partitioning_test as p1, partitioning_test as p2 WHERE p1.id = p2.value_1
) as foo,
(
SELECT user_id FROM users_table
) as bar
WHERE foo.value_1 = bar.user_id;
ERROR: relation "partitioning_test" does not exist
LINE 5: SELECT DISTINCT p1.value_1 FROM partitioning_test as p1, pa...
^
SET citus.enable_repartition_joins to OFF;
-- subquery, cte, view and non-partitioned tables
CREATE VIEW subquery_and_ctes AS
SELECT
*
FROM
(
WITH cte AS (
WITH local_cte AS (
SELECT * FROM users_table_local
),
dist_cte AS (
SELECT
user_id
FROM
events_table,
(SELECT DISTINCT value_1 FROM partitioning_test OFFSET 0) as foo
WHERE
events_table.user_id = foo.value_1 AND
events_table.user_id IN (SELECT DISTINCT value_1 FROM users_table ORDER BY 1 LIMIT 3)
)
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
)
SELECT
count(*) as cnt
FROM
cte,
(SELECT
DISTINCT events_table.user_id
FROM
partitioning_test, events_table
WHERE
events_table.user_id = partitioning_test.id AND
event_type IN (1,2,3,4)
ORDER BY 1 DESC LIMIT 5
) as foo
WHERE foo.user_id = cte.user_id
) as foo, users_table WHERE foo.cnt > users_table.value_2;
ERROR: relation "partitioning_test" does not exist
LINE 15: (SELECT DISTINCT value_1 FROM partitioning_test OFFSET 0)...
^
SELECT * FROM subquery_and_ctes
ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC
LIMIT 5;
ERROR: relation "subquery_and_ctes" does not exist
LINE 1: SELECT * FROM subquery_and_ctes
^
-- deep subquery, partitioned and non-partitioned tables together
SELECT count(*)
FROM
(
SELECT avg(min) FROM
(
SELECT min(partitioning_test.value_1) FROM
(
SELECT avg(event_type) as avg_ev_type FROM
(
SELECT
max(value_1) as mx_val_1
FROM (
SELECT
avg(event_type) as avg
FROM
(
SELECT
cnt
FROM
(SELECT count(*) as cnt, value_1 FROM partitioning_test GROUP BY value_1) as level_1, users_table
WHERE
users_table.user_id = level_1.cnt
) as level_2, events_table
WHERE events_table.user_id = level_2.cnt
GROUP BY level_2.cnt
) as level_3, users_table
WHERE user_id = level_3.avg
GROUP BY level_3.avg
) as level_4, events_table
WHERE level_4.mx_val_1 = events_table.user_id
GROUP BY level_4.mx_val_1
) as level_5, partitioning_test
WHERE
level_5.avg_ev_type = partitioning_test.id
GROUP BY
level_5.avg_ev_type
) as level_6, users_table WHERE users_table.user_id = level_6.min
GROUP BY users_table.value_1
) as bar;
ERROR: relation "partitioning_test" does not exist
LINE 20: (SELECT count(*) as cnt, value_1 FROM partitioning_...
^
SET client_min_messages TO DEFAULT;
DROP SCHEMA subquery_and_partitioning CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table users_table_local
drop cascades to table events_table_local
SET search_path TO public;

View File

@ -1,208 +0,0 @@
--
-- MULTI_REPARTITION_JOIN_PLANNING
--
-- Tests that cover repartition join planning. Note that we explicitly start a
-- transaction block here so that we don't emit debug messages with changing
-- transaction ids in them. Also, we set the executor type to task tracker
-- executor here, as we cannot run repartition jobs with real time executor.
SET citus.next_shard_id TO 690000;
SET citus.enable_unique_job_ids TO off;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
version_above_nine
--------------------
f
(1 row)
BEGIN;
SET client_min_messages TO DEBUG4;
DEBUG: CommitTransactionCommand
SET citus.task_executor_type TO 'task-tracker';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
-- Debug4 log messages display jobIds within them. We explicitly set the jobId
-- sequence here so that the regression output becomes independent of the number
-- of jobs executed prior to running this test.
-- Multi-level repartition join to verify our projection columns are correctly
-- referenced and propagated across multiple repartition jobs. The test also
-- validates that only the minimal necessary projection columns are transferred
-- between jobs.
SELECT
l_partkey, o_orderkey, count(*)
FROM
lineitem, part_append, orders, customer_append
WHERE
l_orderkey = o_orderkey AND
l_partkey = p_partkey AND
c_custkey = o_custkey AND
(l_quantity > 5.0 OR l_extendedprice > 1200.0) AND
p_size > 8 AND o_totalprice > 10.0 AND
c_acctbal < 5000.0 AND l_partkey < 1000
GROUP BY
l_partkey, o_orderkey
ORDER BY
l_partkey, o_orderkey;
DEBUG: StartTransactionCommand
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
DEBUG: generated sql query for task 1
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290002 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))"
DEBUG: generated sql query for task 2
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290003 orders ON ((lineitem.l_orderkey OPERATOR(pg_catalog.=) orders.o_orderkey))) WHERE ((lineitem.l_partkey OPERATOR(pg_catalog.<) 1000) AND (orders.o_totalprice OPERATOR(pg_catalog.>) 10.0))"
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
DEBUG: generated sql query for task 2
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000003".intermediate_column_1_0, "pg_merge_job_0001.task_000003".intermediate_column_1_1, "pg_merge_job_0001.task_000003".intermediate_column_1_2, "pg_merge_job_0001.task_000003".intermediate_column_1_3, "pg_merge_job_0001.task_000003".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000003 "pg_merge_job_0001.task_000003" JOIN part_append_290005 part_append ON (("pg_merge_job_0001.task_000003".intermediate_column_1_0 OPERATOR(pg_catalog.=) part_append.p_partkey))) WHERE (part_append.p_size OPERATOR(pg_catalog.>) 8)"
DEBUG: generated sql query for task 4
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000006".intermediate_column_1_0, "pg_merge_job_0001.task_000006".intermediate_column_1_1, "pg_merge_job_0001.task_000006".intermediate_column_1_2, "pg_merge_job_0001.task_000006".intermediate_column_1_3, "pg_merge_job_0001.task_000006".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000006 "pg_merge_job_0001.task_000006" JOIN part_append_280002 part_append ON (("pg_merge_job_0001.task_000006".intermediate_column_1_0 OPERATOR(pg_catalog.=) part_append.p_partkey))) WHERE (part_append.p_size OPERATOR(pg_catalog.>) 8)"
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 3
DETAIL: Creating dependency on merge taskId 6
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57638
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
DEBUG: generated sql query for task 2
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000005".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000005".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000005 "pg_merge_job_0002.task_000005" JOIN customer_append_290004 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000005".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000005".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000005".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000005".intermediate_column_2_0, "pg_merge_job_0002.task_000005".intermediate_column_2_1"
DEBUG: generated sql query for task 4
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000008".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000008".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000008 "pg_merge_job_0002.task_000008" JOIN customer_append_280001 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000008".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000008".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000008".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000008".intermediate_column_2_0, "pg_merge_job_0002.task_000008".intermediate_column_2_1"
DEBUG: generated sql query for task 6
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000011".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000011".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000011 "pg_merge_job_0002.task_000011" JOIN customer_append_280000 customer_append ON ((customer_append.c_custkey OPERATOR(pg_catalog.=) "pg_merge_job_0002.task_000011".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000011".intermediate_column_2_2 OPERATOR(pg_catalog.>) 5.0) OR ("pg_merge_job_0002.task_000011".intermediate_column_2_3 OPERATOR(pg_catalog.>) 1200.0)) AND (customer_append.c_acctbal OPERATOR(pg_catalog.<) 5000.0)) GROUP BY "pg_merge_job_0002.task_000011".intermediate_column_2_0, "pg_merge_job_0002.task_000011".intermediate_column_2_1"
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 3
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 11
DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637
DEBUG: completed cleanup query for job 3
DEBUG: completed cleanup query for job 3
DEBUG: completed cleanup query for job 2
DEBUG: completed cleanup query for job 2
DEBUG: completed cleanup query for job 1
DEBUG: completed cleanup query for job 1
DEBUG: CommitTransactionCommand
l_partkey | o_orderkey | count
-----------+------------+-------
18 | 12005 | 1
79 | 5121 | 1
91 | 2883 | 1
222 | 9413 | 1
278 | 1287 | 1
309 | 2374 | 1
318 | 321 | 1
321 | 5984 | 1
337 | 10403 | 1
350 | 13698 | 1
358 | 4323 | 1
364 | 9347 | 1
416 | 640 | 1
426 | 10855 | 1
450 | 35 | 1
484 | 3843 | 1
504 | 14566 | 1
510 | 13569 | 1
532 | 3175 | 1
641 | 134 | 1
669 | 10944 | 1
716 | 2885 | 1
738 | 4355 | 1
802 | 2534 | 1
824 | 9287 | 1
864 | 3175 | 1
957 | 4293 | 1
960 | 10980 | 1
963 | 4580 | 1
(29 rows)
SELECT
l_partkey, o_orderkey, count(*)
FROM
lineitem, orders
WHERE
l_suppkey = o_shippriority AND
l_quantity < 5.0 AND o_totalprice <> 4.0
GROUP BY
l_partkey, o_orderkey
ORDER BY
l_partkey, o_orderkey;
DEBUG: StartTransactionCommand
DEBUG: generated sql query for task 1
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)"
DEBUG: generated sql query for task 2
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity OPERATOR(pg_catalog.<) 5.0)"
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: generated sql query for task 1
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290002 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)"
DEBUG: generated sql query for task 2
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290003 orders WHERE (o_totalprice OPERATOR(pg_catalog.<>) 4.0)"
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: generated sql query for task 3
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000003".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000003".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000003 "pg_merge_job_0004.task_000003" JOIN pg_merge_job_0005.task_000003 "pg_merge_job_0005.task_000003" ON (("pg_merge_job_0004.task_000003".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000003".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000003".intermediate_column_4_0, "pg_merge_job_0005.task_000003".intermediate_column_5_0"
DEBUG: generated sql query for task 6
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000006".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000006".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000006 "pg_merge_job_0004.task_000006" JOIN pg_merge_job_0005.task_000006 "pg_merge_job_0005.task_000006" ON (("pg_merge_job_0004.task_000006".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000006".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000006".intermediate_column_4_0, "pg_merge_job_0005.task_000006".intermediate_column_5_0"
DEBUG: generated sql query for task 9
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000009".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000009".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000009 "pg_merge_job_0004.task_000009" JOIN pg_merge_job_0005.task_000009 "pg_merge_job_0005.task_000009" ON (("pg_merge_job_0004.task_000009".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000009".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000009".intermediate_column_4_0, "pg_merge_job_0005.task_000009".intermediate_column_5_0"
DEBUG: generated sql query for task 12
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000012".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000012".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000012 "pg_merge_job_0004.task_000012" JOIN pg_merge_job_0005.task_000012 "pg_merge_job_0005.task_000012" ON (("pg_merge_job_0004.task_000012".intermediate_column_4_1 OPERATOR(pg_catalog.=) "pg_merge_job_0005.task_000012".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000012".intermediate_column_4_0, "pg_merge_job_0005.task_000012".intermediate_column_5_0"
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 9 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57638
DEBUG: completed cleanup query for job 6
DEBUG: completed cleanup query for job 6
DEBUG: completed cleanup query for job 4
DEBUG: completed cleanup query for job 4
DEBUG: completed cleanup query for job 5
DEBUG: completed cleanup query for job 5
DEBUG: CommitTransactionCommand
l_partkey | o_orderkey | count
-----------+------------+-------
(0 rows)
-- Reset client logging level to its previous value
SET client_min_messages TO NOTICE;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
COMMIT;

View File

@ -1,141 +0,0 @@
--
-- MULTI_REPARTITION_JOIN_TASK_ASSIGNMENT
--
-- Tests which cover task assignment for MapMerge jobs for single range repartition
-- and dual hash repartition joins. The tests also cover task assignment propagation
-- from a sql task to its depended tasks. Note that we set the executor type to task
-- tracker executor here, as we cannot run repartition jobs with real time executor.
SET citus.next_shard_id TO 710000;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
version_above_nine
--------------------
f
(1 row)
BEGIN;
SET client_min_messages TO DEBUG3;
DEBUG: CommitTransactionCommand
SET citus.task_executor_type TO 'task-tracker';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
-- Single range repartition join to test anchor-shard based task assignment and
-- assignment propagation to merge and data-fetch tasks.
SELECT
count(*)
FROM
orders, customer_append
WHERE
o_custkey = c_custkey;
DEBUG: StartTransactionCommand
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 3
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 9
DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637
DEBUG: CommitTransactionCommand
count
-------
2985
(1 row)
-- Single range repartition join, along with a join with a small table containing
-- more than one shard. This situation results in multiple sql tasks depending on
-- the same merge task, and tests our constraint group creation and assignment
-- propagation.
SELECT
count(*)
FROM
orders_reference, customer_append, lineitem
WHERE
o_custkey = c_custkey AND
o_orderkey = l_orderkey;
DEBUG: StartTransactionCommand
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 3 to node localhost:57638
DEBUG: assigned task 1 to node localhost:57637
DEBUG: join prunable for intervals [1,5986] and [8997,14947]
DEBUG: join prunable for intervals [8997,14947] and [1,5986]
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 4
DEBUG: pruning merge fetch taskId 3
DETAIL: Creating dependency on merge taskId 8
DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638
DEBUG: CommitTransactionCommand
count
-------
12000
(1 row)
-- Dual hash repartition join which tests the separate hash repartition join
-- task assignment algorithm.
SELECT
count(*)
FROM
lineitem, customer_append
WHERE
l_partkey = c_nationkey;
DEBUG: StartTransactionCommand
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 3 to node localhost:57638
DEBUG: assigned task 1 to node localhost:57637
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 4
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
DEBUG: assigned task 3 to node localhost:57638
DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 9 to node localhost:57638
DEBUG: assigned task 12 to node localhost:57637
DEBUG: CommitTransactionCommand
count
-------
125
(1 row)
-- Reset client logging level to its previous value
SET client_min_messages TO NOTICE;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
COMMIT;

View File

@ -1,289 +0,0 @@
--
-- MULTI_TASK_ASSIGNMENT
--
SET citus.next_shard_id TO 880000;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
version_above_nine
--------------------
f
(1 row)
SET citus.explain_distributed_queries TO off;
-- Check that our policies for assigning tasks to worker nodes run as expected.
-- To test this, we first create a shell table, and then manually insert shard
-- and shard placement data into system catalogs. We next run Explain command,
-- and check that tasks are assigned to worker nodes as expected.
CREATE TABLE task_assignment_test_table (test_id integer);
SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append');
create_distributed_table
--------------------------
(1 row)
-- Create logical shards with shardids 200, 201, and 202
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue)
SELECT pg_class.oid, series.index, 'r', 1, 1000
FROM pg_class, generate_series(200, 202) AS series(index)
WHERE pg_class.relname = 'task_assignment_test_table';
-- Create shard placements for shard 200 and 201
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport)
SELECT 200, 1, 1, nodename, nodeport
FROM pg_dist_shard_placement
GROUP BY nodename, nodeport
ORDER BY nodename, nodeport ASC
LIMIT 2;
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport)
SELECT 201, 1, 1, nodename, nodeport
FROM pg_dist_shard_placement
GROUP BY nodename, nodeport
ORDER BY nodename, nodeport ASC
LIMIT 2;
-- Create shard placements for shard 202
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport)
SELECT 202, 1, 1, nodename, nodeport
FROM pg_dist_shard_placement
GROUP BY nodename, nodeport
ORDER BY nodename, nodeport DESC
LIMIT 2;
-- Start transaction block to avoid auto commits. This avoids additional debug
-- messages from getting printed at real transaction starts and commits.
BEGIN;
-- Increase log level to see which worker nodes tasks are assigned to. Note that
-- the following log messages print node name and port numbers; and node numbers
-- in regression tests depend upon PG_VERSION_NUM.
SET client_min_messages TO DEBUG3;
DEBUG: CommitTransactionCommand
-- First test the default greedy task assignment policy
SET citus.task_assignment_policy TO 'greedy';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637
DEBUG: CommitTransactionCommand
QUERY PLAN
-----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637
DEBUG: CommitTransactionCommand
QUERY PLAN
-----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
-- Next test the first-replica task assignment policy
SET citus.task_assignment_policy TO 'first-replica';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: CommitTransactionCommand
QUERY PLAN
-----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: CommitTransactionCommand
QUERY PLAN
-----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
-- Finally test the round-robin task assignment policy
SET citus.task_assignment_policy TO 'round-robin';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 3 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57638
DEBUG: assigned task 1 to node localhost:57637
DEBUG: CommitTransactionCommand
QUERY PLAN
-----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: CommitTransactionCommand
QUERY PLAN
-----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 3 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57638
DEBUG: assigned task 1 to node localhost:57637
DEBUG: CommitTransactionCommand
QUERY PLAN
-----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
RESET citus.task_assignment_policy;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
RESET client_min_messages;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
COMMIT;
BEGIN;
SET LOCAL client_min_messages TO DEBUG3;
DEBUG: CommitTransactionCommand
SET LOCAL citus.explain_distributed_queries TO off;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
-- Check how task_assignment_policy impact planning decisions for reference tables
CREATE TABLE task_assignment_reference_table (test_id integer);
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
SELECT create_reference_table('task_assignment_reference_table');
DEBUG: StartTransactionCommand
DEBUG: CommitTransactionCommand
create_reference_table
------------------------
(1 row)
SET LOCAL citus.task_assignment_policy TO 'greedy';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: Creating router plan
DEBUG: Plan is router executable
DEBUG: CommitTransactionCommand
QUERY PLAN
--------------------------------------------------------------
Custom Scan (Citus Router)
explain statements for distributed queries are not enabled
(2 rows)
EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: Creating router plan
DEBUG: Plan is router executable
DEBUG: CommitTransactionCommand
QUERY PLAN
--------------------------------------------------------------
Custom Scan (Citus Router)
explain statements for distributed queries are not enabled
(2 rows)
SET LOCAL citus.task_assignment_policy TO 'first-replica';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: Creating router plan
DEBUG: Plan is router executable
DEBUG: CommitTransactionCommand
QUERY PLAN
--------------------------------------------------------------
Custom Scan (Citus Router)
explain statements for distributed queries are not enabled
(2 rows)
EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: Creating router plan
DEBUG: Plan is router executable
DEBUG: CommitTransactionCommand
QUERY PLAN
--------------------------------------------------------------
Custom Scan (Citus Router)
explain statements for distributed queries are not enabled
(2 rows)
-- here we expect debug output showing two different hosts for subsequent queries
SET LOCAL citus.task_assignment_policy TO 'round-robin';
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 0 to node localhost:57637
DEBUG: Creating router plan
DEBUG: Plan is router executable
DEBUG: CommitTransactionCommand
QUERY PLAN
--------------------------------------------------------------
Custom Scan (Citus Router)
explain statements for distributed queries are not enabled
(2 rows)
EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 0 to node localhost:57638
DEBUG: Creating router plan
DEBUG: Plan is router executable
DEBUG: CommitTransactionCommand
QUERY PLAN
--------------------------------------------------------------
Custom Scan (Citus Router)
explain statements for distributed queries are not enabled
(2 rows)
ROLLBACK;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand

View File

@ -1,930 +0,0 @@
---
--- tests around access tracking within transaction blocks
---
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 10 AS version_ten_or_above;
version_ten_or_above
----------------------
f
(1 row)
CREATE SCHEMA access_tracking;
SET search_path TO 'access_tracking';
CREATE OR REPLACE FUNCTION relation_select_access_mode(relationId Oid)
RETURNS int
LANGUAGE C STABLE STRICT
AS 'citus', $$relation_select_access_mode$$;
CREATE OR REPLACE FUNCTION relation_dml_access_mode(relationId Oid)
RETURNS int
LANGUAGE C STABLE STRICT
AS 'citus', $$relation_dml_access_mode$$;
CREATE OR REPLACE FUNCTION relation_ddl_access_mode(relationId Oid)
RETURNS int
LANGUAGE C STABLE STRICT
AS 'citus', $$relation_ddl_access_mode$$;
CREATE OR REPLACE FUNCTION relation_access_mode_to_text(relationShardAccess int)
RETURNS text AS
$$
BEGIN
IF relationShardAccess = 0 THEN
RETURN 'not_accessed';
ELSIF relationShardAccess = 1 THEN
RETURN 'sequential_access';
ELSE
RETURN 'parallel_access';
END IF;
END;
$$ LANGUAGE 'plpgsql' IMMUTABLE;
CREATE VIEW relation_acesses AS
SELECT table_name,
relation_access_mode_to_text(relation_select_access_mode(table_name::regclass)) as select_access,
relation_access_mode_to_text(relation_dml_access_mode(table_name::regclass)) as dml_access,
relation_access_mode_to_text(relation_ddl_access_mode(table_name::regclass)) as ddl_access
FROM
((SELECT 'table_' || i as table_name FROM generate_series(1, 7) i) UNION (SELECT 'partitioning_test') UNION (SELECT 'partitioning_test_2009') UNION (SELECT 'partitioning_test_2010')) tables;
SET citus.shard_replication_factor TO 1;
CREATE TABLE table_1 (key int, value int);
SELECT create_distributed_table('table_1', 'key');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE table_2 (key int, value int);
SELECT create_distributed_table('table_2', 'key');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE table_3 (key int, value int);
SELECT create_distributed_table('table_3', 'key');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE table_4 (key int, value int);
SELECT create_distributed_table('table_4', 'key');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE table_5 (key int, value int);
SELECT create_distributed_table('table_5', 'key');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE table_6 (key int, value int);
SELECT create_reference_Table('table_6');
create_reference_table
------------------------
(1 row)
INSERT INTO table_1 SELECT i, i FROM generate_series(0,100) i;
INSERT INTO table_2 SELECT i, i FROM generate_series(0,100) i;
INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i;
INSERT INTO table_4 SELECT i, i FROM generate_series(0,100) i;
INSERT INTO table_5 SELECT i, i FROM generate_series(0,100) i;
INSERT INTO table_6 SELECT i, i FROM generate_series(0,100) i;
-- create_distributed_table works fine
BEGIN;
CREATE TABLE table_7 (key int, value int);
SELECT create_distributed_table('table_7', 'key');
create_distributed_table
--------------------------
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_7') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+--------------+-----------------
table_7 | not_accessed | not_accessed | parallel_access
(1 row)
COMMIT;
-- outisde the transaction blocks, the function always returns zero
SELECT count(*) FROM table_1;
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+---------------+--------------+--------------
table_1 | not_accessed | not_accessed | not_accessed
(1 row)
-- a very simple test that first checks sequential
-- and parallel SELECTs,DMLs, and DDLs
BEGIN;
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+---------------+--------------+--------------
table_1 | not_accessed | not_accessed | not_accessed
(1 row)
SELECT count(*) FROM table_1 WHERE key = 1;
count
-------
1
(1 row)
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+-------------------+--------------+--------------
table_1 | sequential_access | not_accessed | not_accessed
(1 row)
SELECT count(*) FROM table_1 WHERE key = 1 OR key = 2;
count
-------
2
(1 row)
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+-----------------+--------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
(1 row)
INSERT INTO table_1 VALUES (1,1);
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+-----------------+-------------------+--------------
table_1 | parallel_access | sequential_access | not_accessed
(1 row)
INSERT INTO table_1 VALUES (1,1), (2,2);
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+-----------------+-------------------+--------------
table_1 | parallel_access | sequential_access | not_accessed
(1 row)
ALTER TABLE table_1 ADD COLUMN test_col INT;
-- now see that the other tables are not accessed at all
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+-----------------+-------------------+-----------------
table_1 | parallel_access | sequential_access | parallel_access
(1 row)
ROLLBACK;
-- this test shows that even if two multiple single shard
-- commands executed, we can treat the transaction as sequential
BEGIN;
SELECT count(*) FROM table_1 WHERE key = 1;
count
-------
1
(1 row)
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+-------------------+--------------+--------------
table_1 | sequential_access | not_accessed | not_accessed
(1 row)
SELECT count(*) FROM table_1 WHERE key = 2;
count
-------
1
(1 row)
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+-------------------+--------------+--------------
table_1 | sequential_access | not_accessed | not_accessed
(1 row)
INSERT INTO table_1 VALUES (1,1);
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+-------------------+-------------------+--------------
table_1 | sequential_access | sequential_access | not_accessed
(1 row)
INSERT INTO table_1 VALUES (2,2);
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+-------------------+-------------------+--------------
table_1 | sequential_access | sequential_access | not_accessed
(1 row)
ROLLBACK;
-- a sample DDL example
BEGIN;
ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key);
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+---------------+--------------+-----------------
table_1 | not_accessed | not_accessed | parallel_access
(1 row)
ROLLBACK;
-- a simple join touches single shard per table
BEGIN;
SELECT
count(*)
FROM
table_1, table_2, table_3, table_4, table_5
WHERE
table_1.key = table_2.key AND table_2.key = table_3.key AND
table_3.key = table_4.key AND table_4.key = table_5.key AND
table_1.key = 1;
count
-------
1
(1 row)
SELECT * FROM relation_acesses WHERE table_name LIKE 'table_%' ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-------------------+--------------+--------------
table_1 | sequential_access | not_accessed | not_accessed
table_2 | sequential_access | not_accessed | not_accessed
table_3 | sequential_access | not_accessed | not_accessed
table_4 | sequential_access | not_accessed | not_accessed
table_5 | sequential_access | not_accessed | not_accessed
table_6 | not_accessed | not_accessed | not_accessed
table_7 | not_accessed | not_accessed | not_accessed
(7 rows)
ROLLBACK;
-- a simple real-time join touches all shard per table
BEGIN;
SELECT
count(*)
FROM
table_1, table_2
WHERE
table_1.key = table_2.key;
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+--------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
table_2 | parallel_access | not_accessed | not_accessed
(2 rows)
ROLLBACK;
-- a simple real-time join touches all shard per table
-- in sequential mode
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT
count(*)
FROM
table_1, table_2
WHERE
table_1.key = table_2.key;
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-------------------+--------------+--------------
table_1 | sequential_access | not_accessed | not_accessed
table_2 | sequential_access | not_accessed | not_accessed
(2 rows)
ROLLBACK;
-- a simple subquery pushdown that touches all shards
BEGIN;
SELECT
count(*)
FROM
(
SELECT
random()
FROM
table_1, table_2, table_3, table_4, table_5
WHERE
table_1.key = table_2.key AND table_2.key = table_3.key AND
table_3.key = table_4.key AND table_4.key = table_5.key
) as foo;
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name LIKE 'table_%' ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+--------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
table_2 | parallel_access | not_accessed | not_accessed
table_3 | parallel_access | not_accessed | not_accessed
table_4 | parallel_access | not_accessed | not_accessed
table_5 | parallel_access | not_accessed | not_accessed
table_6 | not_accessed | not_accessed | not_accessed
table_7 | not_accessed | not_accessed | not_accessed
(7 rows)
ROLLBACK;
-- simple multi shard update both sequential and parallel modes
-- note that in multi shard modify mode we always add select
-- access for all the shards accessed. But, sequential mode is OK
BEGIN;
UPDATE table_1 SET value = 15;
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_name | select_access | dml_access | ddl_access
------------+-----------------+-----------------+--------------
table_1 | parallel_access | parallel_access | not_accessed
(1 row)
SET LOCAL citus.multi_shard_modify_mode = 'sequential';
UPDATE table_2 SET value = 15;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-------------------+-------------------+--------------
table_1 | parallel_access | parallel_access | not_accessed
table_2 | sequential_access | sequential_access | not_accessed
(2 rows)
ROLLBACK;
-- now UPDATE/DELETE with subselect pushdown
BEGIN;
UPDATE
table_1 SET value = 15
WHERE key IN (SELECT key FROM table_2 JOIN table_3 USING (key) WHERE table_2.value = 15);
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+-----------------+--------------
table_1 | parallel_access | parallel_access | not_accessed
table_2 | parallel_access | not_accessed | not_accessed
table_3 | parallel_access | not_accessed | not_accessed
(3 rows)
ROLLBACK;
-- INSERT .. SELECT pushdown
BEGIN;
INSERT INTO table_2 SELECT * FROM table_1;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+-----------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
table_2 | not_accessed | parallel_access | not_accessed
(2 rows)
ROLLBACK;
-- INSERT .. SELECT pushdown in sequential mode should be OK
BEGIN;
SET LOCAL citus.multi_shard_modify_mode = 'sequential';
INSERT INTO table_2 SELECT * FROM table_1;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-------------------+-------------------+--------------
table_1 | sequential_access | not_accessed | not_accessed
table_2 | not_accessed | sequential_access | not_accessed
(2 rows)
ROLLBACK;
-- coordinator INSERT .. SELECT
BEGIN;
INSERT INTO table_2 SELECT * FROM table_1 OFFSET 0;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+-----------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
table_2 | not_accessed | parallel_access | not_accessed
(2 rows)
ROLLBACK;
-- recursively planned SELECT
BEGIN;
SELECT
count(*)
FROM
(
SELECT
random()
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
OFFSET 0
) as foo;
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+--------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
table_2 | parallel_access | not_accessed | not_accessed
(2 rows)
ROLLBACK;
-- recursively planned SELECT and coordinator INSERT .. SELECT
BEGIN;
INSERT INTO table_3 (key)
SELECT
*
FROM
(
SELECT
random() * 1000
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
OFFSET 0
) as foo;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+-----------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
table_2 | parallel_access | not_accessed | not_accessed
table_3 | not_accessed | parallel_access | not_accessed
(3 rows)
ROLLBACK;
-- recursively planned SELECT and coordinator INSERT .. SELECT
-- but modifies single shard, marked as sequential operation
BEGIN;
INSERT INTO table_3 (key)
SELECT
*
FROM
(
SELECT
random() * 1000
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
AND table_1.key = 1
OFFSET 0
) as foo;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-------------------+-------------------+--------------
table_1 | sequential_access | not_accessed | not_accessed
table_2 | sequential_access | not_accessed | not_accessed
table_3 | not_accessed | sequential_access | not_accessed
(3 rows)
ROLLBACK;
-- recursively planned SELECT and recursively planned multi-shard DELETE
BEGIN;
DELETE FROM table_3 where key IN
(
SELECT
*
FROM
(
SELECT
table_1.key
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
OFFSET 0
) as foo
) AND value IN (SELECT key FROM table_4);
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3', 'table_4') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+-----------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
table_2 | parallel_access | not_accessed | not_accessed
table_3 | parallel_access | parallel_access | not_accessed
table_4 | parallel_access | not_accessed | not_accessed
(4 rows)
ROLLBACK;
-- copy out
BEGIN;
COPY (SELECT * FROM table_1 WHERE key IN (1,2,3) ORDER BY 1) TO stdout;
1 1
2 2
3 3
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+--------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
(1 row)
ROLLBACK;
-- copy in
BEGIN;
COPY table_1 FROM STDIN WITH CSV;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+-----------------+--------------
table_1 | not_accessed | parallel_access | not_accessed
(1 row)
ROLLBACK;
-- copy in single shard
BEGIN;
COPY table_1 FROM STDIN WITH CSV;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+-------------------+--------------
table_1 | not_accessed | sequential_access | not_accessed
(1 row)
ROLLBACK;
-- reference table accesses should always be a sequential
BEGIN;
SELECT count(*) FROM table_6;
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_6');
table_name | select_access | dml_access | ddl_access
------------+-------------------+--------------+--------------
table_6 | sequential_access | not_accessed | not_accessed
(1 row)
UPDATE table_6 SET value = 15;
SELECT * FROM relation_acesses WHERE table_name IN ('table_6');
table_name | select_access | dml_access | ddl_access
------------+-------------------+-------------------+--------------
table_6 | sequential_access | sequential_access | not_accessed
(1 row)
ALTER TABLE table_6 ADD COLUMN x INT;
SELECT * FROM relation_acesses WHERE table_name IN ('table_6');
table_name | select_access | dml_access | ddl_access
------------+-------------------+-------------------+-------------------
table_6 | sequential_access | sequential_access | sequential_access
(1 row)
ROLLBACK;
-- reference table join with a distributed table
BEGIN;
SELECT count(*) FROM table_1 JOIN table_6 USING(key);
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_6', 'table_1') ORDER BY 1,2;
table_name | select_access | dml_access | ddl_access
------------+-----------------+--------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
table_6 | parallel_access | not_accessed | not_accessed
(2 rows)
ROLLBACK;
-- TRUNCATE should be DDL
BEGIN;
TRUNCATE table_1;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+--------------+-----------------
table_1 | not_accessed | not_accessed | parallel_access
(1 row)
ROLLBACK;
-- TRUNCATE can be a sequential DDL
BEGIN;
SET LOCAL citus.multi_shard_modify_mode = 'sequential';
TRUNCATE table_1;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+--------------+-------------------
table_1 | not_accessed | not_accessed | sequential_access
(1 row)
ROLLBACK;
-- TRUNCATE on a reference table should be sequential
BEGIN;
TRUNCATE table_6;
SELECT * FROM relation_acesses WHERE table_name IN ('table_6') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+--------------+-------------------
table_6 | not_accessed | not_accessed | sequential_access
(1 row)
ROLLBACK;
-- creating foreign keys should consider adding the placement accesses for the referenced table
ALTER TABLE table_1 ADD CONSTRAINT table_1_u UNIQUE (key);
BEGIN;
ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key);
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+--------------+-----------------
table_1 | not_accessed | not_accessed | parallel_access
table_2 | not_accessed | not_accessed | parallel_access
(2 rows)
ROLLBACK;
-- creating foreign keys should consider adding the placement accesses for the referenced table
-- in sequential mode as well
BEGIN;
SET LOCAL citus.multi_shard_modify_mode = 'sequential';
ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key);
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+--------------+-------------------
table_1 | not_accessed | not_accessed | sequential_access
table_2 | not_accessed | not_accessed | sequential_access
(2 rows)
ROLLBACK;
CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time);
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partitioning_test(id int, time date) PARTITION ...
^
SELECT create_distributed_table('partitioning_test', 'id');
ERROR: relation "partitioning_test" does not exist
LINE 1: SELECT create_distributed_table('partitioning_test', 'id');
^
-- Adding partition tables via CREATE TABLE should have DDL access the partitioned table as well
BEGIN;
CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partitioning_test_2009 PARTITION OF partitionin...
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
-- Adding partition tables via ATTACH PARTITION on local tables should have DDL access the partitioned table as well
CREATE TABLE partitioning_test_2009 AS SELECT * FROM partitioning_test;
ERROR: relation "partitioning_test" does not exist
LINE 1: ...ATE TABLE partitioning_test_2009 AS SELECT * FROM partitioni...
^
BEGIN;
ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01');
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_...
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
-- Adding partition tables via ATTACH PARTITION on distributed tables should have DDL access the partitioned table as well
CREATE TABLE partitioning_test_2010 AS SELECT * FROM partitioning_test;
ERROR: relation "partitioning_test" does not exist
LINE 1: ...ATE TABLE partitioning_test_2010 AS SELECT * FROM partitioni...
^
SELECT create_distributed_table('partitioning_test_2010', 'id');
ERROR: relation "partitioning_test_2010" does not exist
LINE 1: SELECT create_distributed_table('partitioning_test_2010', 'i...
^
BEGIN;
ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2010 FOR VALUES FROM ('2010-01-01') TO ('2011-01-01');
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_...
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
-- reading from partitioned table marks all of its partitions
BEGIN;
SELECT count(*) FROM partitioning_test;
ERROR: relation "partitioning_test" does not exist
LINE 1: SELECT count(*) FROM partitioning_test;
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
-- reading from partitioned table sequentially marks all of its partitions with sequential accesses
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT count(*) FROM partitioning_test;
ERROR: relation "partitioning_test" does not exist
LINE 1: SELECT count(*) FROM partitioning_test;
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
-- updating partitioned table marks all of its partitions
BEGIN;
UPDATE partitioning_test SET time = now();
ERROR: relation "partitioning_test" does not exist
LINE 1: UPDATE partitioning_test SET time = now();
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
-- updating partitioned table sequentially marks all of its partitions with sequential accesses
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
UPDATE partitioning_test SET time = now();
ERROR: relation "partitioning_test" does not exist
LINE 1: UPDATE partitioning_test SET time = now();
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
-- DDLs on partitioned table marks all of its partitions
BEGIN;
ALTER TABLE partitioning_test ADD COLUMN X INT;
ERROR: relation "partitioning_test" does not exist
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
-- DDLs on partitioned table sequentially marks all of its partitions with sequential accesses
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
ALTER TABLE partitioning_test ADD COLUMN X INT;
ERROR: relation "partitioning_test" does not exist
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
-- reading from partition table marks its parent
BEGIN;
SELECT count(*) FROM partitioning_test_2009;
ERROR: relation "partitioning_test_2009" does not exist
LINE 1: SELECT count(*) FROM partitioning_test_2009;
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
-- rreading from partition table marks its parent with sequential accesses
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT count(*) FROM partitioning_test_2009;
ERROR: relation "partitioning_test_2009" does not exist
LINE 1: SELECT count(*) FROM partitioning_test_2009;
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
-- updating from partition table marks its parent
BEGIN;
UPDATE partitioning_test_2009 SET time = now();
ERROR: relation "partitioning_test_2009" does not exist
LINE 1: UPDATE partitioning_test_2009 SET time = now();
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
-- updating from partition table marks its parent sequential accesses
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
UPDATE partitioning_test_2009 SET time = now();
ERROR: relation "partitioning_test_2009" does not exist
LINE 1: UPDATE partitioning_test_2009 SET time = now();
^
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
-- DDLs on partition table marks its parent
BEGIN;
CREATE INDEX i1000000 ON partitioning_test_2009 (id);
ERROR: relation "partitioning_test_2009" does not exist
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
-- DDLs on partition table marks its parent in sequential mode
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE INDEX i1000000 ON partitioning_test_2009 (id);
ERROR: relation "partitioning_test_2009" does not exist
SELECT * FROM relation_acesses WHERE table_name IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
-- TRUNCATE CASCADE works fine
ALTER TABLE table_2 ADD CONSTRAINT table_2_u FOREIGN KEY (key) REFERENCES table_1(key);
BEGIN;
TRUNCATE table_1 CASCADE;
NOTICE: truncate cascades to table "table_2"
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+--------------+-----------------
table_1 | not_accessed | not_accessed | parallel_access
table_2 | not_accessed | not_accessed | parallel_access
(2 rows)
ROLLBACK;
-- CTEs with SELECT only should work fine
BEGIN;
WITH cte AS (SELECT count(*) FROM table_1)
SELECT * FROM cte;
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+--------------+--------------
table_1 | parallel_access | not_accessed | not_accessed
(1 row)
COMMIT;
-- CTEs with SELECT only in sequential mode should work fine
BEGIN;
SET LOCAL citus.multi_shard_modify_mode = 'sequential';
WITH cte AS (SELECT count(*) FROM table_1)
SELECT * FROM cte;
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-------------------+--------------+--------------
table_1 | sequential_access | not_accessed | not_accessed
(1 row)
COMMIT;
-- modifying CTEs should work fine with multi-row inserts, which are by default in sequential
BEGIN;
WITH cte_1 AS (INSERT INTO table_1 VALUES (1000,1000), (1001, 1001), (1002, 1002) RETURNING *)
SELECT * FROM cte_1 ORDER BY 1;
key | value
------+-------
1000 | 1000
1001 | 1001
1002 | 1002
(3 rows)
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+-------------------+--------------
table_1 | not_accessed | sequential_access | not_accessed
(1 row)
ROLLBACK;
-- modifying CTEs should work fine with parallel mode
BEGIN;
WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *)
SELECT count(*) FROM cte_1 ORDER BY 1;
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+-----------------+--------------
table_1 | parallel_access | parallel_access | not_accessed
(1 row)
ROLLBACK;
-- modifying CTEs should work fine with sequential mode
BEGIN;
WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *)
SELECT count(*) FROM cte_1 ORDER BY 1;
count
-------
101
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+-----------------+-----------------+--------------
table_1 | parallel_access | parallel_access | not_accessed
(1 row)
ROLLBACK;
-- create distributed table with data loading
-- should mark both parallel dml and parallel ddl
DROP TABLE table_3;
CREATE TABLE table_3 (key int, value int);
INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i;
BEGIN;
SELECT create_distributed_table('table_3', 'key');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
SELECT * FROM relation_acesses WHERE table_name IN ('table_3') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
------------+---------------+-----------------+-----------------
table_3 | not_accessed | parallel_access | parallel_access
(1 row)
COMMIT;
SET search_path TO 'public';
DROP SCHEMA access_tracking CASCADE;
NOTICE: drop cascades to 12 other objects
DETAIL: drop cascades to function access_tracking.relation_select_access_mode(oid)
drop cascades to function access_tracking.relation_dml_access_mode(oid)
drop cascades to function access_tracking.relation_ddl_access_mode(oid)
drop cascades to function access_tracking.relation_access_mode_to_text(integer)
drop cascades to view access_tracking.relation_acesses
drop cascades to table access_tracking.table_1
drop cascades to table access_tracking.table_2
drop cascades to table access_tracking.table_4
drop cascades to table access_tracking.table_5
drop cascades to table access_tracking.table_6
drop cascades to table access_tracking.table_7
drop cascades to table access_tracking.table_3

View File

@ -1,358 +0,0 @@
--
-- Distributed Partitioned Table Tests
--
SET citus.next_shard_id TO 1760000;
CREATE SCHEMA partitioned_table_replicated;
SET search_path TO partitioned_table_replicated;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 2;
-- print major version number for version-specific tests
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int AS server_version;
server_version
----------------
9
(1 row)
CREATE TABLE collections (
key bigint,
ts timestamptz,
collection_id integer,
value numeric
) PARTITION BY LIST ( collection_id );
ERROR: syntax error at or near "PARTITION"
LINE 6: ) PARTITION BY LIST ( collection_id );
^
CREATE TABLE collections_1
PARTITION OF collections (key, ts, collection_id, value)
FOR VALUES IN ( 1 );
ERROR: syntax error at or near "PARTITION"
LINE 2: PARTITION OF collections (key, ts, collection_id, value)
^
CREATE TABLE collections_2
PARTITION OF collections (key, ts, collection_id, value)
FOR VALUES IN ( 2 );
ERROR: syntax error at or near "PARTITION"
LINE 2: PARTITION OF collections (key, ts, collection_id, value)
^
-- load some data data
INSERT INTO collections (key, ts, collection_id, value) VALUES (1, '2009-01-01', 1, 1);
ERROR: relation "collections" does not exist
LINE 1: INSERT INTO collections (key, ts, collection_id, value) VALU...
^
INSERT INTO collections (key, ts, collection_id, value) VALUES (2, '2009-01-01', 1, 2);
ERROR: relation "collections" does not exist
LINE 1: INSERT INTO collections (key, ts, collection_id, value) VALU...
^
INSERT INTO collections (key, ts, collection_id, value) VALUES (3, '2009-01-01', 2, 1);
ERROR: relation "collections" does not exist
LINE 1: INSERT INTO collections (key, ts, collection_id, value) VALU...
^
INSERT INTO collections (key, ts, collection_id, value) VALUES (4, '2009-01-01', 2, 2);
ERROR: relation "collections" does not exist
LINE 1: INSERT INTO collections (key, ts, collection_id, value) VALU...
^
-- in the first case, we'll distributed the
-- already existing partitioninong hierarcy
SELECT create_distributed_table('collections', 'key');
ERROR: relation "collections" does not exist
LINE 1: SELECT create_distributed_table('collections', 'key');
^
-- now create partition of a already distributed table
CREATE TABLE collections_3 PARTITION OF collections FOR VALUES IN ( 3 );
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE collections_3 PARTITION OF collections FOR VALU...
^
-- now attaching non distributed table to a distributed table
CREATE TABLE collections_4 AS SELECT * FROM collections LIMIT 0;
ERROR: relation "collections" does not exist
LINE 1: CREATE TABLE collections_4 AS SELECT * FROM collections LIMI...
^
-- load some data
INSERT INTO collections_4 SELECT i, '2009-01-01', 4, i FROM generate_series (0, 10) i;
ERROR: relation "collections_4" does not exist
LINE 1: INSERT INTO collections_4 SELECT i, '2009-01-01', 4, i FROM ...
^
ALTER TABLE collections ATTACH PARTITION collections_4 FOR VALUES IN ( 4 );
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE collections ATTACH PARTITION collections_4 FOR V...
^
-- finally attach a distributed table to a distributed table
CREATE TABLE collections_5 AS SELECT * FROM collections LIMIT 0;
ERROR: relation "collections" does not exist
LINE 1: CREATE TABLE collections_5 AS SELECT * FROM collections LIMI...
^
SELECT create_distributed_table('collections_5', 'key');
ERROR: relation "collections_5" does not exist
LINE 1: SELECT create_distributed_table('collections_5', 'key');
^
-- load some data
INSERT INTO collections_5 SELECT i, '2009-01-01', 5, i FROM generate_series (0, 10) i;
ERROR: relation "collections_5" does not exist
LINE 1: INSERT INTO collections_5 SELECT i, '2009-01-01', 5, i FROM ...
^
ALTER TABLE collections ATTACH PARTITION collections_5 FOR VALUES IN ( 5 );
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE collections ATTACH PARTITION collections_5 FOR V...
^
-- make sure that we've all the placements
SELECT
logicalrelid, count(*) as placement_count
FROM
pg_dist_shard, pg_dist_shard_placement
WHERE
logicalrelid::text LIKE '%collections%' AND
pg_dist_shard.shardid = pg_dist_shard_placement.shardid
GROUP BY
logicalrelid
ORDER BY
1,2;
logicalrelid | placement_count
--------------+-----------------
(0 rows)
-- and, make sure that all tables are colocated
SELECT
count(DISTINCT colocationid)
FROM
pg_dist_partition
WHERE
logicalrelid::text LIKE '%collections%';
count
-------
0
(1 row)
-- make sure that any kind of modification is disallowed on partitions
-- given that replication factor > 1
INSERT INTO collections_4 (key, ts, collection_id, value) VALUES (4, '2009-01-01', 2, 2);
ERROR: relation "collections_4" does not exist
LINE 1: INSERT INTO collections_4 (key, ts, collection_id, value) VA...
^
-- single shard update/delete not allowed
UPDATE collections_1 SET ts = now() WHERE key = 1;
ERROR: relation "collections_1" does not exist
LINE 1: UPDATE collections_1 SET ts = now() WHERE key = 1;
^
DELETE FROM collections_1 WHERE ts = now() AND key = 1;
ERROR: relation "collections_1" does not exist
LINE 1: DELETE FROM collections_1 WHERE ts = now() AND key = 1;
^
-- multi shard update/delete are not allowed
UPDATE collections_1 SET ts = now();
ERROR: relation "collections_1" does not exist
LINE 1: UPDATE collections_1 SET ts = now();
^
DELETE FROM collections_1 WHERE ts = now();
ERROR: relation "collections_1" does not exist
LINE 1: DELETE FROM collections_1 WHERE ts = now();
^
-- insert..select pushdown
INSERT INTO collections_1 SELECT * FROM collections_1;
ERROR: relation "collections_1" does not exist
LINE 1: INSERT INTO collections_1 SELECT * FROM collections_1;
^
-- insert..select via coordinator
INSERT INTO collections_1 SELECT * FROM collections_1 OFFSET 0;
ERROR: relation "collections_1" does not exist
LINE 1: INSERT INTO collections_1 SELECT * FROM collections_1 OFFSET...
^
-- COPY is not allowed
COPY collections_1 FROM STDIN;
ERROR: relation "collections_1" does not exist
\.
invalid command \.
-- DDLs are not allowed
CREATE INDEX index_on_partition ON collections_1(key);
ERROR: relation "collections_1" does not exist
-- EXPLAIN with modifications is not allowed as well
UPDATE collections_1 SET ts = now() WHERE key = 1;
ERROR: relation "collections_1" does not exist
LINE 1: UPDATE collections_1 SET ts = now() WHERE key = 1;
^
-- TRUNCATE is also not allowed
TRUNCATE collections_1;
ERROR: relation "collections_1" does not exist
TRUNCATE collections, collections_1;
ERROR: relation "collections" does not exist
-- modifying CTEs are also not allowed
WITH collections_5_cte AS
(
DELETE FROM collections_5 RETURNING *
)
SELECT * FROM collections_5_cte;
ERROR: relation "collections_5" does not exist
LINE 3: DELETE FROM collections_5 RETURNING *
^
-- foreign key creation is disallowed due to replication factor > 1
CREATE TABLE fkey_test (key bigint PRIMARY KEY);
SELECT create_distributed_table('fkey_test', 'key');
create_distributed_table
--------------------------
(1 row)
ALTER TABLE
collections_5
ADD CONSTRAINT
fkey_delete FOREIGN KEY(key)
REFERENCES
fkey_test(key) ON DELETE CASCADE;
ERROR: relation "collections_5" does not exist
-- we should be able to attach and detach partitions
-- given that those DDLs are on the parent table
CREATE TABLE collections_6
PARTITION OF collections (key, ts, collection_id, value)
FOR VALUES IN ( 6 );
ERROR: syntax error at or near "PARTITION"
LINE 2: PARTITION OF collections (key, ts, collection_id, value)
^
ALTER TABLE collections DETACH PARTITION collections_6;
ERROR: syntax error at or near "DETACH"
LINE 1: ALTER TABLE collections DETACH PARTITION collections_6;
^
ALTER TABLE collections ATTACH PARTITION collections_6 FOR VALUES IN ( 6 );
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE collections ATTACH PARTITION collections_6 FOR V...
^
-- read queries works just fine
SELECT count(*) FROM collections_1 WHERE key = 1;
ERROR: relation "collections_1" does not exist
LINE 1: SELECT count(*) FROM collections_1 WHERE key = 1;
^
SELECT count(*) FROM collections_1 WHERE key != 1;
ERROR: relation "collections_1" does not exist
LINE 1: SELECT count(*) FROM collections_1 WHERE key != 1;
^
-- rollups SELECT'ing from partitions should work just fine
CREATE TABLE collections_agg (
key bigint,
sum_value numeric
);
SELECT create_distributed_table('collections_agg', 'key');
create_distributed_table
--------------------------
(1 row)
-- pushdown roll-up
INSERT INTO collections_agg SELECT key, sum(key) FROM collections_1 GROUP BY key;
ERROR: relation "collections_1" does not exist
LINE 1: ...RT INTO collections_agg SELECT key, sum(key) FROM collection...
^
-- coordinator roll-up
INSERT INTO collections_agg SELECT collection_id, sum(key) FROM collections_1 GROUP BY collection_id;
ERROR: relation "collections_1" does not exist
LINE 1: ...llections_agg SELECT collection_id, sum(key) FROM collection...
^
-- now make sure that repair functionality works fine
-- create a table and create its distribution metadata
CREATE TABLE customer_engagements (id integer, event_id int) PARTITION BY LIST ( event_id );
ERROR: syntax error at or near "PARTITION"
LINE 1: ...E customer_engagements (id integer, event_id int) PARTITION ...
^
CREATE TABLE customer_engagements_1
PARTITION OF customer_engagements
FOR VALUES IN ( 1 );
ERROR: syntax error at or near "PARTITION"
LINE 2: PARTITION OF customer_engagements
^
CREATE TABLE customer_engagements_2
PARTITION OF customer_engagements
FOR VALUES IN ( 2 );
ERROR: syntax error at or near "PARTITION"
LINE 2: PARTITION OF customer_engagements
^
-- add some indexes
CREATE INDEX ON customer_engagements (id);
ERROR: relation "customer_engagements" does not exist
CREATE INDEX ON customer_engagements (event_id);
ERROR: relation "customer_engagements" does not exist
CREATE INDEX ON customer_engagements (id, event_id);
ERROR: relation "customer_engagements" does not exist
-- distribute the table
-- create a single shard on the first worker
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('customer_engagements', 'id', 'hash');
ERROR: relation "customer_engagements" does not exist
LINE 1: SELECT create_distributed_table('customer_engagements', 'id'...
^
-- ingest some data for the tests
INSERT INTO customer_engagements VALUES (1, 1);
ERROR: relation "customer_engagements" does not exist
LINE 1: INSERT INTO customer_engagements VALUES (1, 1);
^
INSERT INTO customer_engagements VALUES (2, 1);
ERROR: relation "customer_engagements" does not exist
LINE 1: INSERT INTO customer_engagements VALUES (2, 1);
^
INSERT INTO customer_engagements VALUES (1, 2);
ERROR: relation "customer_engagements" does not exist
LINE 1: INSERT INTO customer_engagements VALUES (1, 2);
^
INSERT INTO customer_engagements VALUES (2, 2);
ERROR: relation "customer_engagements" does not exist
LINE 1: INSERT INTO customer_engagements VALUES (2, 2);
^
-- the following queries does the following:
-- (i) create a new shard
-- (ii) mark the second shard placements as unhealthy
-- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones
-- (iv) do a successful master_copy_shard_placement from the first placement to the second
-- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
-- get the newshardid
SELECT shardid as newshardid FROM pg_dist_shard WHERE logicalrelid = 'customer_engagements'::regclass
\gset
ERROR: relation "customer_engagements" does not exist
LINE 1: ...ewshardid FROM pg_dist_shard WHERE logicalrelid = 'customer_...
^
-- now, update the second placement as unhealthy
UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid
AND groupid = :worker_2_group;
ERROR: syntax error at or near ":"
LINE 1: ...dist_placement SET shardstate = 3 WHERE shardid = :newshardi...
^
-- cannot repair a shard after a modification (transaction still open during repair)
BEGIN;
INSERT INTO customer_engagements VALUES (1, 1);
ERROR: relation "customer_engagements" does not exist
LINE 1: INSERT INTO customer_engagements VALUES (1, 1);
^
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
ERROR: syntax error at or near ":"
LINE 1: SELECT master_copy_shard_placement(:newshardid, 'localhost',...
^
ROLLBACK;
-- modifications after reparing a shard are fine (will use new metadata)
BEGIN;
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
ERROR: syntax error at or near ":"
LINE 1: SELECT master_copy_shard_placement(:newshardid, 'localhost',...
^
ALTER TABLE customer_engagements ADD COLUMN value float DEFAULT 1.0;
ERROR: current transaction is aborted, commands ignored until end of transaction block
SELECT * FROM customer_engagements ORDER BY 1,2,3;
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
BEGIN;
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
ERROR: syntax error at or near ":"
LINE 1: SELECT master_copy_shard_placement(:newshardid, 'localhost',...
^
INSERT INTO customer_engagements VALUES (1, 1);
ERROR: current transaction is aborted, commands ignored until end of transaction block
SELECT count(*) FROM customer_engagements;
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
-- TRUNCATE is allowed on the parent table
-- try it just before dropping the table
TRUNCATE collections;
ERROR: relation "collections" does not exist
SET search_path TO public;
DROP SCHEMA partitioned_table_replicated CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table partitioned_table_replicated.fkey_test
drop cascades to table partitioned_table_replicated.collections_agg

View File

@ -1,246 +0,0 @@
-- ===================================================================
-- test recursive planning functionality on partitioned tables
-- ===================================================================
CREATE SCHEMA subquery_and_partitioning;
SET search_path TO subquery_and_partitioning, public;
CREATE TABLE users_table_local AS SELECT * FROM users_table;
CREATE TABLE events_table_local AS SELECT * FROM events_table;
CREATE TABLE partitioning_test(id int, value_1 int, time date) PARTITION BY RANGE (time);
ERROR: syntax error at or near "PARTITION"
LINE 1: ...partitioning_test(id int, value_1 int, time date) PARTITION ...
^
-- create its partitions
CREATE TABLE partitioning_test_2017 PARTITION OF partitioning_test FOR VALUES FROM ('2017-01-01') TO ('2018-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partitioning_test_2017 PARTITION OF partitionin...
^
CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partitioning_test_2010 PARTITION OF partitionin...
^
-- load some data and distribute tables
INSERT INTO partitioning_test VALUES (1, 1, '2017-11-23');
ERROR: relation "partitioning_test" does not exist
LINE 1: INSERT INTO partitioning_test VALUES (1, 1, '2017-11-23');
^
INSERT INTO partitioning_test VALUES (2, 1, '2010-07-07');
ERROR: relation "partitioning_test" does not exist
LINE 1: INSERT INTO partitioning_test VALUES (2, 1, '2010-07-07');
^
INSERT INTO partitioning_test_2017 VALUES (3, 3, '2017-11-22');
ERROR: relation "partitioning_test_2017" does not exist
LINE 1: INSERT INTO partitioning_test_2017 VALUES (3, 3, '2017-11-22...
^
INSERT INTO partitioning_test_2010 VALUES (4, 4, '2010-03-03');
ERROR: relation "partitioning_test_2010" does not exist
LINE 1: INSERT INTO partitioning_test_2010 VALUES (4, 4, '2010-03-03...
^
-- distribute partitioned table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('partitioning_test', 'id');
ERROR: relation "partitioning_test" does not exist
LINE 1: SELECT create_distributed_table('partitioning_test', 'id');
^
SET client_min_messages TO DEBUG1;
-- subplan for partitioned tables
SELECT
id
FROM
(SELECT
DISTINCT partitioning_test.id
FROM
partitioning_test
LIMIT 5
) as foo
ORDER BY 1 DESC;
ERROR: relation "partitioning_test" does not exist
LINE 7: partitioning_test
^
-- final query is router on partitioned tables
SELECT
*
FROM
(SELECT
DISTINCT partitioning_test.id
FROM
partitioning_test
LIMIT 5
) as foo,
(SELECT
DISTINCT partitioning_test.time
FROM
partitioning_test
LIMIT 5
) as bar
WHERE foo.id = date_part('day', bar.time)
ORDER BY 2 DESC, 1;
ERROR: relation "partitioning_test" does not exist
LINE 7: partitioning_test
^
-- final query is real-time
SELECT
*
FROM
(SELECT
DISTINCT partitioning_test.time
FROM
partitioning_test
ORDER BY 1 DESC
LIMIT 5
) as foo,
(
SELECT
DISTINCT partitioning_test.id
FROM
partitioning_test
) as bar
WHERE date_part('day', foo.time) = bar.id
ORDER BY 2 DESC, 1 DESC
LIMIT 3;
ERROR: relation "partitioning_test" does not exist
LINE 7: partitioning_test
^
-- final query is real-time that is joined with partitioned table
SELECT
*
FROM
(SELECT
DISTINCT partitioning_test.time
FROM
partitioning_test
ORDER BY 1 DESC
LIMIT 5
) as foo,
(
SELECT
DISTINCT partitioning_test.id
FROM
partitioning_test
) as bar,
partitioning_test
WHERE date_part('day', foo.time) = bar.id AND partitioning_test.id = bar.id
ORDER BY 2 DESC, 1 DESC
LIMIT 3;
ERROR: relation "partitioning_test" does not exist
LINE 7: partitioning_test
^
-- subquery in WHERE clause
SELECT DISTINCT id
FROM partitioning_test
WHERE
id IN (SELECT DISTINCT date_part('day', time) FROM partitioning_test);
ERROR: relation "partitioning_test" does not exist
LINE 2: FROM partitioning_test
^
-- repartition subquery
SET citus.enable_repartition_joins to ON;
SELECT
count(*)
FROM
(
SELECT DISTINCT p1.value_1 FROM partitioning_test as p1, partitioning_test as p2 WHERE p1.id = p2.value_1
) as foo,
(
SELECT user_id FROM users_table
) as bar
WHERE foo.value_1 = bar.user_id;
ERROR: relation "partitioning_test" does not exist
LINE 5: SELECT DISTINCT p1.value_1 FROM partitioning_test as p1, pa...
^
SET citus.enable_repartition_joins to OFF;
-- subquery, cte, view and non-partitioned tables
CREATE VIEW subquery_and_ctes AS
SELECT
*
FROM
(
WITH cte AS (
WITH local_cte AS (
SELECT * FROM users_table_local
),
dist_cte AS (
SELECT
user_id
FROM
events_table,
(SELECT DISTINCT value_1 FROM partitioning_test OFFSET 0) as foo
WHERE
events_table.user_id = foo.value_1 AND
events_table.user_id IN (SELECT DISTINCT value_1 FROM users_table ORDER BY 1 LIMIT 3)
)
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
)
SELECT
count(*) as cnt
FROM
cte,
(SELECT
DISTINCT events_table.user_id
FROM
partitioning_test, events_table
WHERE
events_table.user_id = partitioning_test.id AND
event_type IN (1,2,3,4)
ORDER BY 1 DESC LIMIT 5
) as foo
WHERE foo.user_id = cte.user_id
) as foo, users_table WHERE foo.cnt > users_table.value_2;
ERROR: relation "partitioning_test" does not exist
LINE 15: (SELECT DISTINCT value_1 FROM partitioning_test OFFSET 0)...
^
SELECT * FROM subquery_and_ctes
ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC
LIMIT 5;
ERROR: relation "subquery_and_ctes" does not exist
LINE 1: SELECT * FROM subquery_and_ctes
^
-- deep subquery, partitioned and non-partitioned tables together
SELECT count(*)
FROM
(
SELECT avg(min) FROM
(
SELECT min(partitioning_test.value_1) FROM
(
SELECT avg(event_type) as avg_ev_type FROM
(
SELECT
max(value_1) as mx_val_1
FROM (
SELECT
avg(event_type) as avg
FROM
(
SELECT
cnt
FROM
(SELECT count(*) as cnt, value_1 FROM partitioning_test GROUP BY value_1) as level_1, users_table
WHERE
users_table.user_id = level_1.cnt
) as level_2, events_table
WHERE events_table.user_id = level_2.cnt
GROUP BY level_2.cnt
) as level_3, users_table
WHERE user_id = level_3.avg
GROUP BY level_3.avg
) as level_4, events_table
WHERE level_4.mx_val_1 = events_table.user_id
GROUP BY level_4.mx_val_1
) as level_5, partitioning_test
WHERE
level_5.avg_ev_type = partitioning_test.id
GROUP BY
level_5.avg_ev_type
) as level_6, users_table WHERE users_table.user_id = level_6.min
GROUP BY users_table.value_1
) as bar;
ERROR: relation "partitioning_test" does not exist
LINE 20: (SELECT count(*) as cnt, value_1 FROM partitioning_...
^
SET client_min_messages TO DEFAULT;
DROP SCHEMA subquery_and_partitioning CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table users_table_local
drop cascades to table events_table_local
SET search_path TO public;

View File

@ -1,95 +0,0 @@
CREATE SCHEMA with_partitioning;
SET search_path TO with_partitioning, public;
SET citus.shard_replication_factor TO 1;
CREATE TABLE with_partitioning.local_users_2 (user_id int, event_type int);
INSERT INTO local_users_2 VALUES (0, 0), (1, 4), (1, 7), (2, 1), (3, 3), (5, 4), (6, 2), (10, 7);
CREATE TABLE with_partitioning.partitioning_test(id int, time date) PARTITION BY RANGE (time);
ERROR: syntax error at or near "PARTITION"
LINE 1: ...partitioning.partitioning_test(id int, time date) PARTITION ...
^
-- create its partitions
CREATE TABLE with_partitioning.partitioning_test_2017 PARTITION OF partitioning_test FOR VALUES FROM ('2017-01-01') TO ('2018-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: ...TE TABLE with_partitioning.partitioning_test_2017 PARTITION ...
^
CREATE TABLE with_partitioning.partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: ...TE TABLE with_partitioning.partitioning_test_2010 PARTITION ...
^
-- load some data and distribute tables
INSERT INTO partitioning_test VALUES (1, '2017-11-23');
ERROR: relation "partitioning_test" does not exist
LINE 1: INSERT INTO partitioning_test VALUES (1, '2017-11-23');
^
INSERT INTO partitioning_test VALUES (2, '2010-07-07');
ERROR: relation "partitioning_test" does not exist
LINE 1: INSERT INTO partitioning_test VALUES (2, '2010-07-07');
^
INSERT INTO partitioning_test_2017 VALUES (3, '2017-11-22');
ERROR: relation "partitioning_test_2017" does not exist
LINE 1: INSERT INTO partitioning_test_2017 VALUES (3, '2017-11-22');
^
INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03');
ERROR: relation "partitioning_test_2010" does not exist
LINE 1: INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03');
^
-- distribute partitioned table
SELECT create_distributed_table('with_partitioning.partitioning_test', 'id');
ERROR: relation "with_partitioning.partitioning_test" does not exist
LINE 1: SELECT create_distributed_table('with_partitioning.partition...
^
-- Join of a CTE on distributed table and then join with a partitioned table
WITH cte AS (
SELECT * FROM users_table
)
SELECT DISTINCT ON (id) id, cte.time FROM cte join partitioning_test on cte.time::date=partitioning_test.time ORDER BY 1, 2 LIMIT 3;
ERROR: relation "partitioning_test" does not exist
LINE 4: ...ELECT DISTINCT ON (id) id, cte.time FROM cte join partitioni...
^
-- Join of a CTE on distributed table and then join with a partitioned table hitting on only one partition
WITH cte AS (
SELECT * FROM users_table
)
SELECT DISTINCT ON (id) id, cte.time FROM cte join partitioning_test on cte.time::date=partitioning_test.time WHERE partitioning_test.time >'2017-11-20' ORDER BY 1, 2 LIMIT 3;
ERROR: relation "partitioning_test" does not exist
LINE 4: ...ELECT DISTINCT ON (id) id, cte.time FROM cte join partitioni...
^
-- Join with a distributed table and then join of two CTEs
WITH cte AS (
SELECT id, time FROM partitioning_test
),
cte_2 AS (
SELECT * FROM partitioning_test WHERE id > 2
),
cte_joined AS (
SELECT user_id, cte_2.time FROM users_table join cte_2 on (users_table.time::date = cte_2.time)
),
cte_joined_2 AS (
SELECT user_id, cte_joined.time FROM cte_joined join cte on (cte_joined.time = cte.time)
)
SELECT DISTINCT ON (event_type) event_type, cte_joined_2.user_id FROM events_table join cte_joined_2 on (cte_joined_2.time=events_table.time::date) ORDER BY 1, 2 LIMIT 10 OFFSET 2;
ERROR: relation "partitioning_test" does not exist
LINE 2: SELECT id, time FROM partitioning_test
^
-- Join a partitioned table with a local table (both in CTEs)
-- and then with a distributed table. After all join with a
-- partitioned table again
WITH cte AS (
SELECT id, time FROM partitioning_test
),
cte_2 AS (
SELECT * FROM local_users_2
),
cte_joined AS (
SELECT user_id, cte.time FROM cte join cte_2 on (cte.id = cte_2.user_id)
),
cte_joined_2 AS (
SELECT users_table.user_id, cte_joined.time FROM cte_joined join users_table on (cte_joined.time = users_table.time::date)
)
SELECT DISTINCT ON (id) id, cte_joined_2.time FROM cte_joined_2 join partitioning_test on (cte_joined_2.time=partitioning_test.time) ORDER BY 1, 2;
ERROR: relation "partitioning_test" does not exist
LINE 2: SELECT id, time FROM partitioning_test
^
DROP SCHEMA with_partitioning CASCADE;
NOTICE: drop cascades to table local_users_2