Remove Postgres 10 support

pull/3103/head^2
Philip Dubé 2019-10-08 16:43:59 +00:00 committed by Philip Dubé
parent 95633416f7
commit 74cb168205
103 changed files with 671 additions and 24171 deletions

2
configure vendored
View File

@ -2543,7 +2543,7 @@ if test -z "$version_num"; then
as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5
fi
if test "$version_num" != '10' -a "$version_num" != '11' -a "$version_num" != '12'; then
if test "$version_num" != '11' -a "$version_num" != '12'; then
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5

View File

@ -74,7 +74,7 @@ if test -z "$version_num"; then
AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.])
fi
if test "$version_num" != '10' -a "$version_num" != '11' -a "$version_num" != '12'; then
if test "$version_num" != '11' -a "$version_num" != '12'; then
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
else
AC_MSG_NOTICE([building against PostgreSQL $version_num])

View File

@ -12,8 +12,6 @@
#include "postgres.h"
#if PG_VERSION_NUM >= 110000
#include "catalog/pg_proc.h"
#include "commands/defrem.h"
#include "distributed/citus_ruleutils.h"
@ -207,6 +205,3 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure,
return true;
}
#endif /* PG_VERSION_NUM >= 110000 */

View File

@ -22,9 +22,6 @@
#include "catalog/index.h"
#include "catalog/pg_am.h"
#include "catalog/pg_attribute.h"
#if (PG_VERSION_NUM < 110000)
#include "catalog/pg_constraint_fn.h"
#endif
#include "catalog/pg_enum.h"
#include "catalog/pg_extension.h"
#include "catalog/pg_opclass.h"
@ -1166,9 +1163,9 @@ CreateTruncateTrigger(Oid relationId)
trigger->whenClause = NULL;
trigger->isconstraint = false;
CreateTriggerInternal(trigger, NULL, relationId, InvalidOid, InvalidOid, InvalidOid,
InvalidOid, InvalidOid, NULL,
internal, false);
CreateTrigger(trigger, NULL, relationId, InvalidOid, InvalidOid, InvalidOid,
InvalidOid, InvalidOid, NULL,
internal, false);
}

View File

@ -18,9 +18,6 @@
#if (PG_VERSION_NUM >= 120000)
#include "access/genam.h"
#endif
#if (PG_VERSION_NUM < 110000)
#include "catalog/pg_constraint_fn.h"
#endif
#include "catalog/pg_type.h"
#include "distributed/colocation_utils.h"
#include "distributed/commands.h"
@ -151,7 +148,8 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ,
relation->rd_id);
scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidIndexId, true, NULL,
scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidTypidNameIndexId,
true, NULL,
scanKeyCount, scanKey);
heapTuple = systable_getnext(scanDescriptor);
@ -515,7 +513,8 @@ GetTableForeignConstraintCommands(Oid relationId)
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ,
relationId);
scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidIndexId, true, NULL,
scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidTypidNameIndexId,
true, NULL,
scanKeyCount, scanKey);
heapTuple = systable_getnext(scanDescriptor);
@ -523,11 +522,7 @@ GetTableForeignConstraintCommands(Oid relationId)
{
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
#if (PG_VERSION_NUM >= 110000)
bool inheritedConstraint = OidIsValid(constraintForm->conparentid);
#else
bool inheritedConstraint = false;
#endif
if (!inheritedConstraint && constraintForm->contype == CONSTRAINT_FOREIGN)
{
@ -571,7 +566,8 @@ HasForeignKeyToReferenceTable(Oid relationId)
pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ,
relationId);
scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidIndexId, true, NULL,
scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidTypidNameIndexId,
true, NULL,
scanKeyCount, scanKey);
heapTuple = systable_getnext(scanDescriptor);
@ -679,7 +675,7 @@ HeapTupleOfForeignConstraintIncludesColumn(HeapTuple heapTuple, Oid relationId,
{
AttrNumber attrNo = DatumGetInt16(columnArray[attrIdx]);
char *colName = get_attname_internal(relationId, attrNo, false);
char *colName = get_attname(relationId, attrNo, false);
if (strncmp(colName, columnName, NAMEDATALEN) == 0)
{
return true;

View File

@ -79,13 +79,8 @@ static void ErrorIfFunctionDependsOnExtension(const ObjectAddress *functionAddre
PG_FUNCTION_INFO_V1(create_distributed_function);
#if PG_VERSION_NUM >= 110000
#define AssertIsFunctionOrProcedure(objtype) \
Assert((objtype) == OBJECT_FUNCTION || (objtype) == OBJECT_PROCEDURE)
#else
#define AssertIsFunctionOrProcedure(objtype) \
Assert(objtype == OBJECT_FUNCTION)
#endif
/*
@ -597,9 +592,7 @@ GetFunctionAlterOwnerCommand(const RegProcedure funcOid)
procOwner = procform->proowner;
#if (PG_VERSION_NUM >= 110000)
isProcedure = procform->prokind == PROKIND_PROCEDURE;
#endif
ReleaseSysCache(proctup);
}
@ -878,12 +871,10 @@ CreateFunctionStmtObjectAddress(CreateFunctionStmt *stmt, bool missing_ok)
ObjectWithArgs *objectWithArgs = NULL;
ListCell *parameterCell = NULL;
#if PG_VERSION_NUM >= 110000
if (stmt->is_procedure)
{
objectType = OBJECT_PROCEDURE;
}
#endif
objectWithArgs = makeNode(ObjectWithArgs);
objectWithArgs->objname = stmt->funcname;
@ -910,10 +901,7 @@ PlanAlterFunctionStmt(AlterFunctionStmt *stmt, const char *queryString)
const ObjectAddress *address = NULL;
List *commands = NIL;
/* AlterFunctionStmt->objtype has only been added since pg11 */
#if PG_VERSION_NUM >= 110000
AssertIsFunctionOrProcedure(stmt->objtype);
#endif
address = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateAlterFunction(address))
@ -1249,13 +1237,7 @@ ProcessAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryStr
const ObjectAddress *
AlterFunctionStmtObjectAddress(AlterFunctionStmt *stmt, bool missing_ok)
{
ObjectType objectType = OBJECT_FUNCTION;
#if PG_VERSION_NUM >= 110000
objectType = stmt->objtype;
#endif
return FunctionToObjectAddress(objectType, stmt->func, missing_ok);
return FunctionToObjectAddress(stmt->objtype, stmt->func, missing_ok);
}
@ -1303,7 +1285,7 @@ AlterFunctionSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_o
AssertIsFunctionOrProcedure(stmt->objectType);
objectWithArgs = castNode(ObjectWithArgs, stmt->object);
funcOid = LookupFuncWithArgsCompat(stmt->objectType, objectWithArgs, true);
funcOid = LookupFuncWithArgs(stmt->objectType, objectWithArgs, true);
names = objectWithArgs->objname;
if (funcOid == InvalidOid)
@ -1322,7 +1304,7 @@ AlterFunctionSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_o
* error if the type didn't exist in the first place.
*/
objectWithArgs->objname = newNames;
funcOid = LookupFuncWithArgsCompat(stmt->objectType, objectWithArgs, true);
funcOid = LookupFuncWithArgs(stmt->objectType, objectWithArgs, true);
objectWithArgs->objname = names; /* restore the original names */
/*
@ -1336,8 +1318,8 @@ AlterFunctionSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_o
* has just been created (if possible at all). For safety we assign the
* funcOid.
*/
funcOid = LookupFuncWithArgsCompat(stmt->objectType, objectWithArgs,
missing_ok);
funcOid = LookupFuncWithArgs(stmt->objectType, objectWithArgs,
missing_ok);
}
}
@ -1363,7 +1345,7 @@ FunctionToObjectAddress(ObjectType objectType, ObjectWithArgs *objectWithArgs,
AssertIsFunctionOrProcedure(objectType);
funcOid = LookupFuncWithArgsCompat(objectType, objectWithArgs, missing_ok);
funcOid = LookupFuncWithArgs(objectType, objectWithArgs, missing_ok);
address = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*address, ProcedureRelationId, funcOid);

View File

@ -234,7 +234,7 @@ PlanReindexStmt(ReindexStmt *reindexStatement, const char *reindexCommand)
#endif
state.locked_table_oid = InvalidOid;
indOid = RangeVarGetRelidInternal(reindexStatement->relation,
indOid = RangeVarGetRelidExtended(reindexStatement->relation,
lockmode, 0,
RangeVarCallbackForReindexIndex,
&state);
@ -243,7 +243,7 @@ PlanReindexStmt(ReindexStmt *reindexStatement, const char *reindexCommand)
}
else
{
RangeVarGetRelidInternal(reindexStatement->relation, lockmode, 0,
RangeVarGetRelidExtended(reindexStatement->relation, lockmode, 0,
RangeVarCallbackOwnsTable, NULL);
relation = heap_openrv(reindexStatement->relation, NoLock);
@ -349,7 +349,7 @@ PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand)
state.heapOid = InvalidOid;
state.concurrent = dropIndexStatement->concurrent;
indexId = RangeVarGetRelidInternal(rangeVar, lockmode, rvrFlags,
indexId = RangeVarGetRelidExtended(rangeVar, lockmode, rvrFlags,
RangeVarCallbackForDropIndex,
(void *) &state);
@ -654,10 +654,8 @@ RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, voi
*/
expected_relkind = classform->relkind;
#if PG_VERSION_NUM >= 110000
if (expected_relkind == RELKIND_PARTITIONED_INDEX)
expected_relkind = RELKIND_INDEX;
#endif
if (expected_relkind != relkind)
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
@ -667,7 +665,7 @@ RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, voi
if (!pg_class_ownercheck(relOid, GetUserId()) &&
!pg_namespace_ownercheck(classform->relnamespace, GetUserId()))
{
aclcheck_error(ACLCHECK_NOT_OWNER, ACLCHECK_OBJECT_INDEX, rel->relname);
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX, rel->relname);
}
if (!allowSystemTableMods && IsSystemClass(relOid, classform))
@ -747,11 +745,7 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, Oid relId, Oid oldRelI
relkind = get_rel_relkind(relId);
if (!relkind)
return;
if (relkind != RELKIND_INDEX
#if PG_VERSION_NUM >= 110000
&& relkind != RELKIND_PARTITIONED_INDEX
#endif
)
if (relkind != RELKIND_INDEX && relkind != RELKIND_PARTITIONED_INDEX)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not an index", relation->relname)));

View File

@ -121,9 +121,7 @@ PlanAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryString)
return PlanAlterTypeSchemaStmt(stmt, queryString);
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return PlanAlterFunctionSchemaStmt(stmt, queryString);
@ -198,9 +196,7 @@ ProcessAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt, const char *queryStrin
return;
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
ProcessAlterFunctionSchemaStmt(stmt, queryString);

View File

@ -857,7 +857,7 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
}
attributeCount = indexInfo->ii_NumIndexAttrs;
attributeNumberArray = IndexInfoAttributeNumberArray(indexInfo);
attributeNumberArray = indexInfo->ii_IndexAttrNumbers;
for (attributeIndex = 0; attributeIndex < attributeCount; attributeIndex++)
{

View File

@ -211,7 +211,7 @@ multi_ProcessUtility(PlannedStmt *pstmt,
parsetree = ProcessCreateSubscriptionStmt(createSubStmt);
}
#if (PG_VERSION_NUM >= 110000)
if (IsA(parsetree, CallStmt))
{
CallStmt *callStmt = (CallStmt *) parsetree;
@ -253,7 +253,6 @@ multi_ProcessUtility(PlannedStmt *pstmt,
return;
}
#endif
if (IsA(parsetree, DoStmt))
{
@ -426,9 +425,7 @@ multi_ProcessUtility(PlannedStmt *pstmt,
break;
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
ddlJobs = PlanDropFunctionStmt(dropStatement, queryString);
@ -484,9 +481,7 @@ multi_ProcessUtility(PlannedStmt *pstmt,
break;
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
ddlJobs = PlanRenameFunctionStmt(renameStmt, queryString);
@ -843,9 +838,7 @@ PlanAlterOwnerStmt(AlterOwnerStmt *stmt, const char *queryString)
return PlanAlterTypeOwnerStmt(stmt, queryString);
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return PlanAlterFunctionOwnerStmt(stmt, queryString);
@ -871,9 +864,7 @@ PlanAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, const char *queryString
{
switch (stmt->objectType)
{
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return PlanAlterFunctionDependsStmt(stmt, queryString);

View File

@ -44,6 +44,8 @@ static List * VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams,
List *vacuumColumnList);
static StringInfo DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams);
static char * DeparseVacuumColumnNames(List *columnNameList);
static List * VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex);
static List * ExtractVacuumTargetRels(VacuumStmt *vacuumStmt);
static CitusVacuumParams VacuumStmtParams(VacuumStmt *vacstmt);
/*
@ -379,6 +381,40 @@ DeparseVacuumColumnNames(List *columnNameList)
}
/*
* VacuumColumnList returns list of columns from relation
* in the vacuum statement at specified relationIndex.
*/
static List *
VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex)
{
VacuumRelation *vacuumRelation = (VacuumRelation *) list_nth(vacuumStmt->rels,
relationIndex);
return vacuumRelation->va_cols;
}
/*
* ExtractVacuumTargetRels returns list of target
* relations from vacuum statement.
*/
static List *
ExtractVacuumTargetRels(VacuumStmt *vacuumStmt)
{
List *vacuumList = NIL;
ListCell *vacuumRelationCell = NULL;
foreach(vacuumRelationCell, vacuumStmt->rels)
{
VacuumRelation *vacuumRelation = (VacuumRelation *) lfirst(vacuumRelationCell);
vacuumList = lappend(vacuumList, vacuumRelation->relation);
}
return vacuumList;
}
/*
* VacuumStmtParams returns a CitusVacuumParams based on the supplied VacuumStmt.
*/

View File

@ -120,9 +120,7 @@ DeparseDropStmt(DropStmt *stmt)
return DeparseDropTypeStmt(stmt);
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return DeparseDropFunctionStmt(stmt);
@ -188,9 +186,7 @@ DeparseRenameStmt(RenameStmt *stmt)
return DeparseRenameAttributeStmt(stmt);
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return DeparseRenameFunctionStmt(stmt);
@ -243,9 +239,7 @@ DeparseAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt)
return DeparseAlterTypeSchemaStmt(stmt);
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return DeparseAlterFunctionSchemaStmt(stmt);
@ -277,9 +271,7 @@ DeparseAlterOwnerStmt(AlterOwnerStmt *stmt)
return DeparseAlterTypeOwnerStmt(stmt);
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return DeparseAlterFunctionOwnerStmt(stmt);
@ -306,9 +298,7 @@ DeparseAlterObjectDependsStmt(AlterObjectDependsStmt *stmt)
{
switch (stmt->objectType)
{
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return DeparseAlterFunctionDependsStmt(stmt);

View File

@ -86,11 +86,6 @@ AppendAlterFunctionStmt(StringInfo buf, AlterFunctionStmt *stmt)
{
ListCell *actionCell = NULL;
#if (PG_VERSION_NUM < 110000)
appendStringInfo(buf, "ALTER FUNCTION ");
AppendFunctionName(buf, stmt->func, OBJECT_FUNCTION);
#else
if (stmt->objtype == OBJECT_FUNCTION)
{
appendStringInfo(buf, "ALTER FUNCTION ");
@ -101,7 +96,6 @@ AppendAlterFunctionStmt(StringInfo buf, AlterFunctionStmt *stmt)
}
AppendFunctionName(buf, stmt->func, stmt->objtype);
#endif
foreach(actionCell, stmt->actions)
@ -304,11 +298,7 @@ DeparseRenameFunctionStmt(RenameStmt *stmt)
StringInfoData str = { 0 };
initStringInfo(&str);
#if (PG_VERSION_NUM < 110000)
Assert(stmt->renameType == OBJECT_FUNCTION);
#else
Assert(stmt->renameType == OBJECT_FUNCTION || stmt->renameType == OBJECT_PROCEDURE);
#endif
AppendRenameFunctionStmt(&str, stmt);
@ -324,9 +314,6 @@ AppendRenameFunctionStmt(StringInfo buf, RenameStmt *stmt)
{
ObjectWithArgs *func = castNode(ObjectWithArgs, stmt->object);
#if (PG_VERSION_NUM < 110000)
appendStringInfo(buf, "ALTER FUNCTION ");
#else
if (stmt->renameType == OBJECT_FUNCTION)
{
appendStringInfoString(buf, "ALTER FUNCTION ");
@ -335,7 +322,6 @@ AppendRenameFunctionStmt(StringInfo buf, RenameStmt *stmt)
{
appendStringInfoString(buf, "ALTER PROCEDURE ");
}
#endif
AppendFunctionName(buf, func, stmt->renameType);
@ -352,11 +338,7 @@ DeparseAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt)
StringInfoData str = { 0 };
initStringInfo(&str);
#if (PG_VERSION_NUM < 110000)
Assert(stmt->objectType == OBJECT_FUNCTION);
#else
Assert(stmt->objectType == OBJECT_FUNCTION || stmt->objectType == OBJECT_PROCEDURE);
#endif
AppendAlterFunctionSchemaStmt(&str, stmt);
@ -372,9 +354,6 @@ AppendAlterFunctionSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt)
{
ObjectWithArgs *func = castNode(ObjectWithArgs, stmt->object);
#if (PG_VERSION_NUM < 110000)
appendStringInfo(buf, "ALTER FUNCTION ");
#else
if (stmt->objectType == OBJECT_FUNCTION)
{
appendStringInfoString(buf, "ALTER FUNCTION ");
@ -383,7 +362,6 @@ AppendAlterFunctionSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt)
{
appendStringInfoString(buf, "ALTER PROCEDURE ");
}
#endif
AppendFunctionName(buf, func, stmt->objectType);
appendStringInfo(buf, " SET SCHEMA %s;", quote_identifier(stmt->newschema));
@ -399,11 +377,7 @@ DeparseAlterFunctionOwnerStmt(AlterOwnerStmt *stmt)
StringInfoData str = { 0 };
initStringInfo(&str);
#if (PG_VERSION_NUM < 110000)
Assert(stmt->objectType == OBJECT_FUNCTION);
#else
Assert(stmt->objectType == OBJECT_FUNCTION || stmt->objectType == OBJECT_PROCEDURE);
#endif
AppendAlterFunctionOwnerStmt(&str, stmt);
@ -419,9 +393,6 @@ AppendAlterFunctionOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt)
{
ObjectWithArgs *func = castNode(ObjectWithArgs, stmt->object);
#if (PG_VERSION_NUM < 110000)
appendStringInfo(buf, "ALTER FUNCTION ");
#else
if (stmt->objectType == OBJECT_FUNCTION)
{
appendStringInfoString(buf, "ALTER FUNCTION ");
@ -430,7 +401,6 @@ AppendAlterFunctionOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt)
{
appendStringInfoString(buf, "ALTER PROCEDURE ");
}
#endif
AppendFunctionName(buf, func, stmt->objectType);
appendStringInfo(buf, " OWNER TO %s;", RoleSpecString(stmt->newowner));
@ -446,11 +416,7 @@ DeparseAlterFunctionDependsStmt(AlterObjectDependsStmt *stmt)
StringInfoData str = { 0 };
initStringInfo(&str);
#if (PG_VERSION_NUM < 110000)
Assert(stmt->objectType == OBJECT_FUNCTION);
#else
Assert(stmt->objectType == OBJECT_FUNCTION || stmt->objectType == OBJECT_PROCEDURE);
#endif
AppendAlterFunctionDependsStmt(&str, stmt);
@ -466,9 +432,6 @@ AppendAlterFunctionDependsStmt(StringInfo buf, AlterObjectDependsStmt *stmt)
{
ObjectWithArgs *func = castNode(ObjectWithArgs, stmt->object);
#if (PG_VERSION_NUM < 110000)
appendStringInfo(buf, "ALTER FUNCTION ");
#else
if (stmt->objectType == OBJECT_FUNCTION)
{
appendStringInfoString(buf, "ALTER FUNCTION ");
@ -477,7 +440,6 @@ AppendAlterFunctionDependsStmt(StringInfo buf, AlterObjectDependsStmt *stmt)
{
appendStringInfoString(buf, "ALTER PROCEDURE ");
}
#endif
AppendFunctionName(buf, func, stmt->objectType);
appendStringInfo(buf, " DEPENDS ON EXTENSION %s;", strVal(stmt->extname));
@ -493,11 +455,7 @@ DeparseDropFunctionStmt(DropStmt *stmt)
StringInfoData str = { 0 };
initStringInfo(&str);
#if (PG_VERSION_NUM < 110000)
Assert(stmt->removeType == OBJECT_FUNCTION);
#else
Assert(stmt->removeType == OBJECT_FUNCTION || stmt->removeType == OBJECT_PROCEDURE);
#endif
AppendDropFunctionStmt(&str, stmt);
@ -511,9 +469,6 @@ DeparseDropFunctionStmt(DropStmt *stmt)
static void
AppendDropFunctionStmt(StringInfo buf, DropStmt *stmt)
{
#if (PG_VERSION_NUM < 110000)
appendStringInfo(buf, "DROP FUNCTION ");
#else
if (stmt->removeType == OBJECT_FUNCTION)
{
appendStringInfoString(buf, "DROP FUNCTION ");
@ -522,7 +477,6 @@ AppendDropFunctionStmt(StringInfo buf, DropStmt *stmt)
{
appendStringInfoString(buf, "DROP PROCEDURE ");
}
#endif
if (stmt->missing_ok)
{
@ -576,7 +530,7 @@ AppendFunctionName(StringInfo buf, ObjectWithArgs *func, ObjectType objtype)
char *schemaName = NULL;
char *qualifiedFunctionName;
funcid = LookupFuncWithArgsCompat(objtype, func, true);
funcid = LookupFuncWithArgs(objtype, func, true);
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
if (!HeapTupleIsValid(proctup))

View File

@ -143,9 +143,7 @@ RenameStmtObjectAddress(RenameStmt *stmt, bool missing_ok)
return RenameAttributeStmtObjectAddress(stmt, missing_ok);
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return RenameFunctionStmtObjectAddress(stmt, missing_ok);
@ -170,9 +168,7 @@ AlterObjectSchemaStmtObjectAddress(AlterObjectSchemaStmt *stmt, bool missing_ok)
return AlterTypeSchemaStmtObjectAddress(stmt, missing_ok);
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return AlterFunctionSchemaStmtObjectAddress(stmt, missing_ok);
@ -218,9 +214,7 @@ AlterOwnerStmtObjectAddress(AlterOwnerStmt *stmt, bool missing_ok)
return AlterTypeOwnerObjectAddress(stmt, missing_ok);
}
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return AlterFunctionOwnerObjectAddress(stmt, missing_ok);
@ -250,9 +244,7 @@ AlterObjectDependsStmtObjectAddress(AlterObjectDependsStmt *stmt, bool missing_o
{
switch (stmt->objectType)
{
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
case OBJECT_FUNCTION:
{
return AlterFunctionDependsStmtObjectAddress(stmt, missing_ok);

View File

@ -126,12 +126,10 @@ QualifyRenameStmt(RenameStmt *stmt)
}
case OBJECT_FUNCTION:
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
{
QualifyRenameFunctionStmt(stmt);
}
{
QualifyRenameFunctionStmt(stmt);
}
default:
{
@ -199,12 +197,10 @@ QualifyAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt)
}
case OBJECT_FUNCTION:
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
{
QualifyAlterFunctionSchemaStmt(stmt);
}
{
QualifyAlterFunctionSchemaStmt(stmt);
}
default:
{
@ -227,12 +223,10 @@ QualifyAlterOwnerStmt(AlterOwnerStmt *stmt)
}
case OBJECT_FUNCTION:
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
{
QualifyAlterFunctionOwnerStmt(stmt);
}
{
QualifyAlterFunctionOwnerStmt(stmt);
}
default:
{
@ -248,12 +242,10 @@ QualifyAlterObjectDependsStmt(AlterObjectDependsStmt *stmt)
switch (stmt->objectType)
{
case OBJECT_FUNCTION:
#if PG_VERSION_NUM >= 110000
case OBJECT_PROCEDURE:
#endif
{
QualifyAlterFunctionDependsStmt(stmt);
}
{
QualifyAlterFunctionDependsStmt(stmt);
}
default:
{

View File

@ -43,13 +43,7 @@ void QualifyFunctionSchemaName(ObjectWithArgs *func, ObjectType type);
void
QualifyAlterFunctionStmt(AlterFunctionStmt *stmt)
{
ObjectType objtype = OBJECT_FUNCTION;
#if (PG_VERSION_NUM >= 110000)
objtype = stmt->objtype;
#endif
QualifyFunction(stmt->func, objtype);
QualifyFunction(stmt->func, stmt->objtype);
}
@ -61,11 +55,7 @@ QualifyAlterFunctionStmt(AlterFunctionStmt *stmt)
void
QualifyRenameFunctionStmt(RenameStmt *stmt)
{
#if (PG_VERSION_NUM < 110000)
Assert(stmt->renameType == OBJECT_FUNCTION);
#else
Assert(stmt->renameType == OBJECT_FUNCTION || stmt->renameType == OBJECT_PROCEDURE);
#endif
QualifyFunction(castNode(ObjectWithArgs, stmt->object), stmt->renameType);
}
@ -79,11 +69,7 @@ QualifyRenameFunctionStmt(RenameStmt *stmt)
void
QualifyAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt)
{
#if (PG_VERSION_NUM < 110000)
Assert(stmt->objectType == OBJECT_FUNCTION);
#else
Assert(stmt->objectType == OBJECT_FUNCTION || stmt->objectType == OBJECT_PROCEDURE);
#endif
QualifyFunction(castNode(ObjectWithArgs, stmt->object), stmt->objectType);
}
@ -97,11 +83,7 @@ QualifyAlterFunctionSchemaStmt(AlterObjectSchemaStmt *stmt)
void
QualifyAlterFunctionOwnerStmt(AlterOwnerStmt *stmt)
{
#if (PG_VERSION_NUM < 110000)
Assert(stmt->objectType == OBJECT_FUNCTION);
#else
Assert(stmt->objectType == OBJECT_FUNCTION || stmt->objectType == OBJECT_PROCEDURE);
#endif
QualifyFunction(castNode(ObjectWithArgs, stmt->object), stmt->objectType);
}
@ -115,11 +97,7 @@ QualifyAlterFunctionOwnerStmt(AlterOwnerStmt *stmt)
void
QualifyAlterFunctionDependsStmt(AlterObjectDependsStmt *stmt)
{
#if (PG_VERSION_NUM < 110000)
Assert(stmt->objectType == OBJECT_FUNCTION);
#else
Assert(stmt->objectType == OBJECT_FUNCTION || stmt->objectType == OBJECT_PROCEDURE);
#endif
QualifyFunction(castNode(ObjectWithArgs, stmt->object), stmt->objectType);
}
@ -156,7 +134,7 @@ QualifyFunctionSchemaName(ObjectWithArgs *func, ObjectType type)
Oid funcid = InvalidOid;
HeapTuple proctup;
funcid = LookupFuncWithArgsCompat(type, func, true);
funcid = LookupFuncWithArgs(type, func, true);
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
/*

View File

@ -684,15 +684,9 @@ SortTupleStore(CitusScanState *scanState)
sortKeyIndex++;
}
#if (PG_VERSION_NUM >= 110000)
tuplesortstate =
tuplesort_begin_heap(tupleDescriptor, numberOfSortKeys, sortColIdx, sortOperators,
collations, nullsFirst, work_mem, NULL, false);
#else
tuplesortstate =
tuplesort_begin_heap(tupleDescriptor, numberOfSortKeys, sortColIdx, sortOperators,
collations, nullsFirst, work_mem, false);
#endif
while (true)
{

View File

@ -1362,7 +1362,7 @@ EnsureTablePermissions(Oid relationId, AclMode mode)
if (aclresult != ACLCHECK_OK)
{
aclcheck_error(aclresult, ACLCHECK_OBJECT_TABLE, get_rel_name(relationId));
aclcheck_error(aclresult, OBJECT_TABLE, get_rel_name(relationId));
}
}
@ -1376,7 +1376,7 @@ EnsureTableOwner(Oid relationId)
{
if (!pg_class_ownercheck(relationId, GetUserId()))
{
aclcheck_error(ACLCHECK_NOT_OWNER, ACLCHECK_OBJECT_TABLE,
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLE,
get_rel_name(relationId));
}
}
@ -1391,7 +1391,7 @@ EnsureSchemaOwner(Oid schemaId)
{
if (!pg_namespace_ownercheck(schemaId, GetUserId()))
{
aclcheck_error(ACLCHECK_NOT_OWNER, ACLCHECK_OBJECT_SCHEMA,
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SCHEMA,
get_namespace_name(schemaId));
}
}
@ -1406,7 +1406,7 @@ EnsureSequenceOwner(Oid sequenceOid)
{
if (!pg_class_ownercheck(sequenceOid, GetUserId()))
{
aclcheck_error(ACLCHECK_NOT_OWNER, ACLCHECK_OBJECT_SEQUENCE,
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SEQUENCE,
get_rel_name(sequenceOid));
}
}

View File

@ -33,9 +33,6 @@
#include "catalog/namespace.h"
#include "catalog/pg_class.h"
#include "catalog/pg_constraint.h"
#if (PG_VERSION_NUM < 110000)
#include "catalog/pg_constraint_fn.h"
#endif
#include "catalog/pg_index.h"
#include "catalog/pg_type.h"
#include "catalog/pg_namespace.h"

View File

@ -1108,8 +1108,7 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName,
/* fill in the partition column name and shard name in the query. */
partitionColumn = PartitionColumn(relationId, unusedTableId);
partitionColumnName = get_attname_internal(relationId, partitionColumn->varattno,
false);
partitionColumnName = get_attname(relationId, partitionColumn->varattno, false);
appendStringInfo(partitionValueQuery, SHARD_RANGE_QUERY,
partitionColumnName, partitionColumnName, shardName);

View File

@ -276,9 +276,7 @@ ClusterHasDistributedFunctionWithDistArgument(void)
Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock);
#if (PG_VERSION_NUM >= 110000)
TupleDesc tupleDescriptor = RelationGetDescr(pgDistObjectRel);
#endif
pgDistObjectScan =
systable_beginscan(pgDistObjectRel, InvalidOid, false, NULL, 0, NULL);
@ -290,16 +288,10 @@ ClusterHasDistributedFunctionWithDistArgument(void)
if (pg_dist_object->classid == ProcedureRelationId)
{
bool distArgumentIsNull = false;
#if (PG_VERSION_NUM >= 110000)
distArgumentIsNull =
heap_attisnull(pgDistObjectTup,
Anum_pg_dist_object_distribution_argument_index,
tupleDescriptor);
#else
distArgumentIsNull =
heap_attisnull(pgDistObjectTup,
Anum_pg_dist_object_distribution_argument_index);
#endif
/* we found one distributed function that has an distribution argument */
if (!distArgumentIsNull)

View File

@ -1740,17 +1740,9 @@ HasUnresolvedExternParamsWalker(Node *expression, ParamListInfo boundParams)
/* give hook a chance in case parameter is dynamic */
if (boundParams->paramFetch != NULL)
{
#if (PG_VERSION_NUM >= 110000)
ParamExternData externParamPlaceholder;
externParam = (*boundParams->paramFetch)(boundParams, paramId, false,
&externParamPlaceholder);
#else
externParam = &boundParams->params[paramId - 1];
if (!OidIsValid(externParam->ptype))
{
(*boundParams->paramFetch)(boundParams, paramId);
}
#endif
}
else
{

View File

@ -91,15 +91,6 @@ static void ExplainOneQuery(Query *query, int cursorOptions,
IntoClause *into, ExplainState *es,
const char *queryString, ParamListInfo params,
QueryEnvironment *queryEnv);
#if (PG_VERSION_NUM < 110000)
static void ExplainOpenGroup(const char *objtype, const char *labelname,
bool labeled, ExplainState *es);
static void ExplainCloseGroup(const char *objtype, const char *labelname,
bool labeled, ExplainState *es);
static void ExplainXMLTag(const char *tagname, int flags, ExplainState *es);
static void ExplainJSONLineEnding(ExplainState *es);
static void ExplainYAMLLineStarting(ExplainState *es);
#endif
/*
@ -230,7 +221,7 @@ ExplainJob(Job *job, ExplainState *es)
ExplainOpenGroup("Job", "Job", true, es);
ExplainPropertyIntegerInternal("Task Count", NULL, taskCount, es);
ExplainPropertyInteger("Task Count", NULL, taskCount, es);
if (dependedJobCount > 0)
{
@ -306,8 +297,8 @@ ExplainMapMergeJob(MapMergeJob *mapMergeJob, ExplainState *es)
}
ExplainOpenGroup("MapMergeJob", NULL, true, es);
ExplainPropertyIntegerInternal("Map Task Count", NULL, mapTaskCount, es);
ExplainPropertyIntegerInternal("Merge Task Count", NULL, mergeTaskCount, es);
ExplainPropertyInteger("Map Task Count", NULL, mapTaskCount, es);
ExplainPropertyInteger("Merge Task Count", NULL, mergeTaskCount, es);
if (dependedJobCount > 0)
{
@ -649,13 +640,10 @@ ExplainOneQuery(Query *query, int cursorOptions,
{
/* if an advisor plugin is present, let it manage things */
if (ExplainOneQuery_hook)
#if (PG_VERSION_NUM >= 110000)
{
(*ExplainOneQuery_hook) (query, cursorOptions, into, es,
queryString, params, queryEnv);
#elif (PG_VERSION_NUM >= 100000)
(*ExplainOneQuery_hook) (query, cursorOptions, into, es,
queryString, params);
#endif
}
else
{
PlannedStmt *plan;
@ -675,182 +663,3 @@ ExplainOneQuery(Query *query, int cursorOptions,
&planduration);
}
}
#if (PG_VERSION_NUM < 110000)
/*
* Open a group of related objects.
*
* objtype is the type of the group object, labelname is its label within
* a containing object (if any).
*
* If labeled is true, the group members will be labeled properties,
* while if it's false, they'll be unlabeled objects.
*/
static void
ExplainOpenGroup(const char *objtype, const char *labelname,
bool labeled, ExplainState *es)
{
switch (es->format)
{
case EXPLAIN_FORMAT_TEXT:
/* nothing to do */
break;
case EXPLAIN_FORMAT_XML:
ExplainXMLTag(objtype, X_OPENING, es);
es->indent++;
break;
case EXPLAIN_FORMAT_JSON:
ExplainJSONLineEnding(es);
appendStringInfoSpaces(es->str, 2 * es->indent);
if (labelname)
{
escape_json(es->str, labelname);
appendStringInfoString(es->str, ": ");
}
appendStringInfoChar(es->str, labeled ? '{' : '[');
/*
* In JSON format, the grouping_stack is an integer list. 0 means
* we've emitted nothing at this grouping level, 1 means we've
* emitted something (and so the next item needs a comma). See
* ExplainJSONLineEnding().
*/
es->grouping_stack = lcons_int(0, es->grouping_stack);
es->indent++;
break;
case EXPLAIN_FORMAT_YAML:
/*
* In YAML format, the grouping stack is an integer list. 0 means
* we've emitted nothing at this grouping level AND this grouping
* level is unlabelled and must be marked with "- ". See
* ExplainYAMLLineStarting().
*/
ExplainYAMLLineStarting(es);
if (labelname)
{
appendStringInfo(es->str, "%s: ", labelname);
es->grouping_stack = lcons_int(1, es->grouping_stack);
}
else
{
appendStringInfoString(es->str, "- ");
es->grouping_stack = lcons_int(0, es->grouping_stack);
}
es->indent++;
break;
}
}
/*
* Close a group of related objects.
* Parameters must match the corresponding ExplainOpenGroup call.
*/
static void
ExplainCloseGroup(const char *objtype, const char *labelname,
bool labeled, ExplainState *es)
{
switch (es->format)
{
case EXPLAIN_FORMAT_TEXT:
/* nothing to do */
break;
case EXPLAIN_FORMAT_XML:
es->indent--;
ExplainXMLTag(objtype, X_CLOSING, es);
break;
case EXPLAIN_FORMAT_JSON:
es->indent--;
appendStringInfoChar(es->str, '\n');
appendStringInfoSpaces(es->str, 2 * es->indent);
appendStringInfoChar(es->str, labeled ? '}' : ']');
es->grouping_stack = list_delete_first(es->grouping_stack);
break;
case EXPLAIN_FORMAT_YAML:
es->indent--;
es->grouping_stack = list_delete_first(es->grouping_stack);
break;
}
}
/*
* Emit opening or closing XML tag.
*
* "flags" must contain X_OPENING, X_CLOSING, or X_CLOSE_IMMEDIATE.
* Optionally, OR in X_NOWHITESPACE to suppress the whitespace we'd normally
* add.
*
* XML tag names can't contain white space, so we replace any spaces in
* "tagname" with dashes.
*/
static void
ExplainXMLTag(const char *tagname, int flags, ExplainState *es)
{
const char *s;
if ((flags & X_NOWHITESPACE) == 0)
appendStringInfoSpaces(es->str, 2 * es->indent);
appendStringInfoCharMacro(es->str, '<');
if ((flags & X_CLOSING) != 0)
appendStringInfoCharMacro(es->str, '/');
for (s = tagname; *s; s++)
appendStringInfoCharMacro(es->str, (*s == ' ') ? '-' : *s);
if ((flags & X_CLOSE_IMMEDIATE) != 0)
appendStringInfoString(es->str, " /");
appendStringInfoCharMacro(es->str, '>');
if ((flags & X_NOWHITESPACE) == 0)
appendStringInfoCharMacro(es->str, '\n');
}
/*
* Emit a JSON line ending.
*
* JSON requires a comma after each property but the last. To facilitate this,
* in JSON format, the text emitted for each property begins just prior to the
* preceding line-break (and comma, if applicable).
*/
static void
ExplainJSONLineEnding(ExplainState *es)
{
Assert(es->format == EXPLAIN_FORMAT_JSON);
if (linitial_int(es->grouping_stack) != 0)
appendStringInfoChar(es->str, ',');
else
linitial_int(es->grouping_stack) = 1;
appendStringInfoChar(es->str, '\n');
}
/*
* Indent a YAML line.
*
* YAML lines are ordinarily indented by two spaces per indentation level.
* The text emitted for each property begins just prior to the preceding
* line-break, except for the first property in an unlabelled group, for which
* it begins immediately after the "- " that introduces the group. The first
* property of the group appears on the same line as the opening "- ".
*/
static void
ExplainYAMLLineStarting(ExplainState *es)
{
Assert(es->format == EXPLAIN_FORMAT_YAML);
if (linitial_int(es->grouping_stack) == 0)
{
linitial_int(es->grouping_stack) = 1;
}
else
{
appendStringInfoChar(es->str, '\n');
appendStringInfoSpaces(es->str, es->indent * 2);
}
}
#endif

View File

@ -1358,8 +1358,7 @@ ExtractFromExpressionWalker(Node *node, QualifierWalkerContext *walkerContext)
{
/* this part of code only run for subqueries */
Node *joinClause = eval_const_expressions(NULL, joinQualifiersNode);
joinClause = (Node *) canonicalize_qual_compat((Expr *) joinClause,
false);
joinClause = (Node *) canonicalize_qual((Expr *) joinClause, false);
joinQualifierList = make_ands_implicit((Expr *) joinClause);
}
}
@ -1392,8 +1391,7 @@ ExtractFromExpressionWalker(Node *node, QualifierWalkerContext *walkerContext)
{
/* this part of code only run for subqueries */
Node *fromClause = eval_const_expressions(NULL, fromQualifiersNode);
fromClause = (Node *) canonicalize_qual_compat((Expr *) fromClause,
false);
fromClause = (Node *) canonicalize_qual((Expr *) fromClause, false);
fromQualifierList = make_ands_implicit((Expr *) fromClause);
}

View File

@ -4375,7 +4375,7 @@ ColumnName(Var *column, List *rangeTableList)
else if (rangeTableKind == CITUS_RTE_RELATION)
{
Oid relationId = rangeTableEntry->relid;
columnName = get_attname_internal(relationId, columnNumber, false);
columnName = get_attname(relationId, columnNumber, false);
}
Assert(columnName != NULL);

View File

@ -29,9 +29,6 @@
#include "catalog/namespace.h"
#include "catalog/pg_class.h"
#include "catalog/pg_constraint.h"
#if (PG_VERSION_NUM < 110000)
#include "catalog/pg_constraint_fn.h"
#endif
#include "distributed/commands.h"
#include "distributed/metadata_cache.h"
#include "distributed/relay_utility.h"
@ -318,7 +315,7 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
{
GrantStmt *grantStmt = (GrantStmt *) parseTree;
if (grantStmt->targtype == ACL_TARGET_OBJECT &&
grantStmt->objtype == RELATION_OBJECT_TYPE)
grantStmt->objtype == OBJECT_TABLE)
{
ListCell *lc;

View File

@ -78,9 +78,7 @@ StartLockAcquireHelperBackgroundWorker(int backendToHelp, int32 lock_cooldown)
snprintf(worker.bgw_name, BGW_MAXLEN,
"Citus Lock Acquire Helper: %d/%u",
backendToHelp, MyDatabaseId);
#if PG_VERSION_NUM >= 110000
snprintf(worker.bgw_type, BGW_MAXLEN, "citus_lock_aqcuire");
#endif
worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
worker.bgw_start_time = BgWorkerStart_RecoveryFinished;

View File

@ -694,8 +694,6 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
deparse_index_columns(buffer, indexStmt->indexParams, deparseContext);
appendStringInfoString(buffer, ") ");
#if PG_VERSION_NUM >= 110000
/* column/expressions for INCLUDE list */
if (indexStmt->indexIncludingParams != NIL)
{
@ -703,7 +701,6 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
deparse_index_columns(buffer, indexStmt->indexIncludingParams, deparseContext);
appendStringInfoChar(buffer, ')');
}
#endif
AppendStorageParametersToString(buffer, indexStmt->options);

View File

@ -206,7 +206,7 @@ ColumnNameToColumn(Oid relationId, char *columnNodeString)
columnNumber, relationName)));
}
columnName = get_attname_internal(relationId, column->varattno, false);
columnName = get_attname(relationId, column->varattno, false);
if (columnName == NULL)
{
char *relationName = get_rel_name(relationId);

View File

@ -13,9 +13,6 @@
#include "catalog/partition.h"
#include "catalog/pg_class.h"
#include "catalog/pg_inherits.h"
#if (PG_VERSION_NUM < 110000)
#include "catalog/pg_constraint_fn.h"
#endif
#include "distributed/citus_ruleutils.h"
#include "distributed/colocation_utils.h"
#include "distributed/master_metadata_utility.h"

View File

@ -839,7 +839,7 @@ lock_relation_if_exists(PG_FUNCTION_ARGS)
relation = makeRangeVarFromNameList(relationNameList);
/* lock the relation with the lock mode */
relationId = RangeVarGetRelidInternal(relation, lockMode, RVR_MISSING_OK,
relationId = RangeVarGetRelidExtended(relation, lockMode, RVR_MISSING_OK,
CitusRangeVarCallbackForLockTable,
(void *) &lockMode);
relationExists = OidIsValid(relationId);
@ -879,13 +879,8 @@ CitusRangeVarCallbackForLockTable(const RangeVar *rangeVar, Oid relationId,
aclResult = CitusLockTableAclCheck(relationId, lockmode, GetUserId());
if (aclResult != ACLCHECK_OK)
{
#if (PG_VERSION_NUM >= 110000)
aclcheck_error(aclResult, get_relkind_objtype(get_rel_relkind(relationId)),
rangeVar->relname);
#else
aclcheck_error(aclResult, ACL_KIND_CLASS, rangeVar->relname);
#endif
}
}

File diff suppressed because it is too large Load Diff

View File

@ -382,7 +382,7 @@ RemoveJobSchema(StringInfo schemaName)
bool permissionsOK = pg_namespace_ownercheck(schemaId, GetUserId());
if (!permissionsOK)
{
aclcheck_error(ACLCHECK_NOT_OWNER, ACLCHECK_OBJECT_SCHEMA, schemaName->data);
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SCHEMA, schemaName->data);
}
schemaObject.classId = NamespaceRelationId;

View File

@ -23,11 +23,8 @@
/* cluster.c - forward declarations */
extern List * PlanClusterStmt(ClusterStmt *clusterStmt, const char *clusterCommand);
#if PG_VERSION_NUM >= 110000
/* call.c */
extern bool CallDistributedProcedureRemotely(CallStmt *callStmt, DestReceiver *dest);
#endif /* PG_VERSION_NUM >= 110000 */
/* extension.c - forward declarations */
extern bool IsCitusExtensionStmt(Node *parsetree);

View File

@ -16,260 +16,10 @@
#include "catalog/namespace.h"
#include "nodes/parsenodes.h"
#include "parser/parse_func.h"
#if (PG_VERSION_NUM >= 120000)
#include "optimizer/optimizer.h"
#endif
#if (PG_VERSION_NUM < 110000)
#include "access/hash.h"
#include "storage/fd.h"
#include "optimizer/prep.h"
#include "postmaster/bgworker.h"
#include "utils/memutils.h"
#include "funcapi.h"
/* PostgreSQL 11 splits hash procs into "standard" and "extended" */
#define HASHSTANDARD_PROC HASHPROC
/* following functions are renamed in PG11 */
#define PreventInTransactionBlock PreventTransactionChain
#define DatumGetJsonbP(d) DatumGetJsonb(d)
#define RequireTransactionBlock RequireTransactionChain
/* following defines also exist for PG11 */
#define RELATION_OBJECT_TYPE ACL_OBJECT_RELATION
#define IndexInfoAttributeNumberArray(indexinfo) (indexinfo->ii_KeyAttrNumbers)
/* CreateTrigger api is changed in PG11 */
#define CreateTriggerInternal(stmt, queryString, relOid, refRelOid, constraintOid, \
indexOid, funcoid, parentTriggerOid, whenClause, isInternal, \
in_partition) \
CreateTrigger(stmt, queryString, relOid, refRelOid, constraintOid, indexOid, \
isInternal)
#define get_attname_internal(relationId, columnNumber, false) \
get_attname(relationId, columnNumber)
#define BackgroundWorkerInitializeConnectionByOid(dboid, useroid, flags) \
BackgroundWorkerInitializeConnectionByOid(dboid, useroid)
#define AtEOXact_Files(isCommit) \
AtEOXact_Files()
#define ACLCHECK_OBJECT_TABLE ACL_KIND_CLASS
#define ACLCHECK_OBJECT_SCHEMA ACL_KIND_NAMESPACE
#define ACLCHECK_OBJECT_INDEX ACL_KIND_CLASS
#define ACLCHECK_OBJECT_SEQUENCE ACL_KIND_CLASS
static inline int
BasicOpenFilePerm(FileName fileName, int fileFlags, int fileMode)
{
return BasicOpenFile(fileName, fileFlags, fileMode);
}
static inline File
PathNameOpenFilePerm(FileName fileName, int fileFlags, int fileMode)
{
return PathNameOpenFile(fileName, fileFlags, fileMode);
}
static inline MemoryContext
AllocSetContextCreateExtended(MemoryContext parent, const char *name, Size minContextSize,
Size initBlockSize, Size maxBlockSize)
{
return AllocSetContextCreate(parent, name, minContextSize, initBlockSize,
maxBlockSize);
}
static inline void
ExplainPropertyIntegerInternal(const char *qlabel, const char *unit, int64 value,
ExplainState *es)
{
return ExplainPropertyInteger(qlabel, value, es);
}
static inline List *
ExtractVacuumTargetRels(VacuumStmt *vacuumStmt)
{
List *vacuumList = NIL;
if (vacuumStmt->relation != NULL)
{
vacuumList = list_make1(vacuumStmt->relation);
}
return vacuumList;
}
static inline List *
VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex)
{
Assert(relationIndex == 0);
return vacuumStmt->va_cols;
}
#define RVR_MISSING_OK 1
#define RVR_NOWAIT 2
static inline Oid
RangeVarGetRelidInternal(const RangeVar *relation, LOCKMODE lockmode, uint32 flags,
RangeVarGetRelidCallback callback, void *callback_arg)
{
bool missingOK = ((flags & RVR_MISSING_OK) != 0);
bool noWait = ((flags & RVR_NOWAIT) != 0);
return RangeVarGetRelidExtended(relation, lockmode, missingOK, noWait,
callback, callback_arg);
}
static inline Expr *
canonicalize_qual_compat(Expr *qual, bool is_check)
{
return canonicalize_qual(qual);
}
/*
* A convenient wrapper around get_expr_result_type() that is added on PG11
*
* Note that this function ignores the second parameter and behaves
* slightly differently than the PG11 version.
*
* 1. The original function throws errors if noError flag is not set, we ignore
* this flag here and return NULL in that case
* 2. TYPEFUNC_COMPOSITE_DOMAIN is introduced in PG11, and references to this
* macro is removed
* */
static inline TupleDesc
get_expr_result_tupdesc(Node *expr, bool noError)
{
TupleDesc tupleDesc;
TypeFuncClass functypclass;
functypclass = get_expr_result_type(expr, NULL, &tupleDesc);
if (functypclass == TYPEFUNC_COMPOSITE)
{
return tupleDesc;
}
return NULL;
}
/* following compat function and macro should be removed when we drop support for PG10 */
static inline Oid
LookupFuncWithArgsCompat(ObjectType objtype, ObjectWithArgs *func, bool noError)
{
if (objtype == OBJECT_FUNCTION)
{
return LookupFuncWithArgs(func, noError);
}
else if (objtype == OBJECT_AGGREGATE)
{
return LookupAggWithArgs(func, noError);
}
return InvalidOid;
}
#endif
#if (PG_VERSION_NUM >= 110000)
#include "optimizer/prep.h"
/* following macros should be removed when we drop support for PG10 and below */
#define RELATION_OBJECT_TYPE OBJECT_TABLE
#define IndexInfoAttributeNumberArray(indexinfo) (indexinfo->ii_IndexAttrNumbers)
#define CreateTriggerInternal CreateTrigger
#define get_attname_internal get_attname
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
#define ACLCHECK_OBJECT_SCHEMA OBJECT_SCHEMA
#define ACLCHECK_OBJECT_INDEX OBJECT_INDEX
#define ACLCHECK_OBJECT_SEQUENCE OBJECT_SEQUENCE
#define ConstraintRelidIndexId ConstraintRelidTypidNameIndexId
static inline void
ExplainPropertyIntegerInternal(const char *qlabel, const char *unit, int64 value,
ExplainState *es)
{
return ExplainPropertyInteger(qlabel, unit, value, es);
}
static inline Expr *
canonicalize_qual_compat(Expr *qual, bool is_check)
{
return canonicalize_qual(qual, is_check);
}
/*
* ExtractVacuumTargetRels returns list of target
* relations from vacuum statement.
*/
static inline List *
ExtractVacuumTargetRels(VacuumStmt *vacuumStmt)
{
List *vacuumList = NIL;
ListCell *vacuumRelationCell = NULL;
foreach(vacuumRelationCell, vacuumStmt->rels)
{
VacuumRelation *vacuumRelation = (VacuumRelation *) lfirst(vacuumRelationCell);
vacuumList = lappend(vacuumList, vacuumRelation->relation);
}
return vacuumList;
}
/*
* VacuumColumnList returns list of columns from relation
* in the vacuum statement at specified relationIndex.
*/
static inline List *
VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex)
{
VacuumRelation *vacuumRelation = (VacuumRelation *) list_nth(vacuumStmt->rels,
relationIndex);
return vacuumRelation->va_cols;
}
static inline Oid
RangeVarGetRelidInternal(const RangeVar *relation, LOCKMODE lockmode, uint32 flags,
RangeVarGetRelidCallback callback, void *callback_arg)
{
return RangeVarGetRelidExtended(relation, lockmode, flags, callback, callback_arg);
}
/* following compat function and macro should be removed when we drop support for PG10 */
static inline Oid
LookupFuncWithArgsCompat(ObjectType objtype, ObjectWithArgs *func, bool noError)
{
return LookupFuncWithArgs(objtype, func, noError);
}
#endif
#if PG_VERSION_NUM >= 120000
#define MakeSingleTupleTableSlotCompat MakeSingleTupleTableSlot

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 11 AS server_verion_eleven_and_above
\gset
\if :server_verion_eleven_and_above
\else
\q
\endif
SET citus.next_shard_id TO 20030000;
CREATE USER procedureuser;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes

View File

@ -1,6 +0,0 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 11 AS server_verion_eleven_and_above
\gset
\if :server_verion_eleven_and_above
\else
\q

View File

@ -1,392 +0,0 @@
SET citus.next_shard_id TO 20010000;
CREATE USER typeuser;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
SELECT run_command_on_workers($$CREATE USER typeuser;$$);
run_command_on_workers
-----------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(2 rows)
CREATE SCHEMA type_tests AUTHORIZATION typeuser;
CREATE SCHEMA type_tests2 AUTHORIZATION typeuser; -- to test creation in a specific schema and moving to schema
SET search_path TO type_tests;
SET citus.shard_count TO 4;
-- single statement transactions with a simple type used in a table
CREATE TYPE tc1 AS (a int, b int);
CREATE TABLE t1 (a int PRIMARY KEY, b tc1);
SELECT create_distributed_table('t1','a');
create_distributed_table
--------------------------
(1 row)
INSERT INTO t1 VALUES (1, (2,3)::tc1);
SELECT * FROM t1;
a | b
---+-------
1 | (2,3)
(1 row)
ALTER TYPE tc1 RENAME TO tc1_newname;
INSERT INTO t1 VALUES (3, (4,5)::tc1_newname); -- insert with a cast would fail if the rename didn't propagate
ALTER TYPE tc1_newname SET SCHEMA type_tests2;
INSERT INTO t1 VALUES (6, (7,8)::type_tests2.tc1_newname); -- insert with a cast would fail if the rename didn't propagate
-- single statement transactions with a an enum used in a table
CREATE TYPE te1 AS ENUM ('one', 'two', 'three');
CREATE TABLE t2 (a int PRIMARY KEY, b te1);
SELECT create_distributed_table('t2','a');
create_distributed_table
--------------------------
(1 row)
INSERT INTO t2 VALUES (1, 'two');
SELECT * FROM t2;
a | b
---+-----
1 | two
(1 row)
-- rename enum, subsequent operations on the type would fail if the rename was not propagated
ALTER TYPE te1 RENAME TO te1_newname;
-- add an extra value to the enum and use in table
ALTER TYPE te1_newname ADD VALUE 'four';
UPDATE t2 SET b = 'four';
SELECT * FROM t2;
a | b
---+------
1 | four
(1 row)
-- change the schema of the type and use the new fully qualified name in an insert
ALTER TYPE te1_newname SET SCHEMA type_tests2;
INSERT INTO t2 VALUES (3, 'three'::type_tests2.te1_newname);
-- transaction block with simple type
BEGIN;
CREATE TYPE tc2 AS (a int, b int);
CREATE TABLE t3 (a int PRIMARY KEY, b tc2);
SELECT create_distributed_table('t3','a');
create_distributed_table
--------------------------
(1 row)
INSERT INTO t3 VALUES (4, (5,6)::tc2);
SELECT * FROM t3;
a | b
---+-------
4 | (5,6)
(1 row)
COMMIT;
-- transaction block with simple type
BEGIN;
CREATE TYPE te2 AS ENUM ('yes', 'no');
CREATE TABLE t4 (a int PRIMARY KEY, b te2);
SELECT create_distributed_table('t4','a');
create_distributed_table
--------------------------
(1 row)
INSERT INTO t4 VALUES (1, 'yes');
SELECT * FROM t4;
a | b
---+-----
1 | yes
(1 row)
-- ALTER TYPE ... ADD VALUE does not work in transactions
COMMIT;
-- verify order of enum labels
SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype;
string_agg
------------
yes,no
(1 row)
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype;$$);
run_command_on_workers
------------------------------
(localhost,57637,t,"yes,no")
(localhost,57638,t,"yes,no")
(2 rows)
-- test some combination of types without ddl propagation, this will prevent the workers
-- from having those types created. They are created just-in-time on table distribution
SET citus.enable_ddl_propagation TO off;
CREATE TYPE tc3 AS (a int, b int);
CREATE TYPE tc4 AS (a int, b tc3[]);
CREATE TYPE tc5 AS (a int, b tc4);
CREATE TYPE te3 AS ENUM ('a','b');
RESET citus.enable_ddl_propagation;
CREATE TABLE t5 (a int PRIMARY KEY, b tc5[], c te3);
SELECT create_distributed_table('t5','a');
create_distributed_table
--------------------------
(1 row)
-- test adding an attribute to a type and a column to a table both for a non-distributed type
SET citus.enable_ddl_propagation TO off;
CREATE TYPE te4 AS ENUM ('c','d');
CREATE TYPE tc6 AS (a int, b int);
CREATE TYPE tc6c AS (a int, b int);
RESET citus.enable_ddl_propagation;
-- types need to be fully qualified because of the search_path which is not supported by ALTER TYPE ... ADD COLUMN
ALTER TABLE t5 ADD COLUMN d type_tests.te4;
ALTER TABLE t5 ADD COLUMN e type_tests.tc6;
ALTER TYPE tc6 ADD ATTRIBUTE c tc6c;
-- last two values are only there if above commands succeeded
INSERT INTO t5 VALUES (1, NULL, 'a', 'd', (1,2,(4,5)::tc6c)::tc6);
-- test renaming an attribute of a distrbuted type and read it by its new name to verify propagation
ALTER TYPE tc6 RENAME ATTRIBUTE b TO d;
SELECT (e::tc6).d FROM t5 ORDER BY 1;
d
---
2
(1 row)
-- change owner of supported types and check ownership on remote server
ALTER TYPE te4 OWNER TO typeuser;
SELECT typname, usename FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid;
typname | usename
---------+----------
te4 | typeuser
(1 row)
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid;$$);
run_command_on_workers
--------------------------------------
(localhost,57637,t,"(te4,typeuser)")
(localhost,57638,t,"(te4,typeuser)")
(2 rows)
ALTER TYPE tc6 OWNER TO typeuser;
SELECT typname, usename FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid;
typname | usename
---------+----------
tc6 | typeuser
(1 row)
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid;$$);
run_command_on_workers
--------------------------------------
(localhost,57637,t,"(tc6,typeuser)")
(localhost,57638,t,"(tc6,typeuser)")
(2 rows)
-- create a type as a different user
SET ROLE typeuser;
-- create directly on the worker
CREATE TYPE tc7 AS (a int, b int);
CREATE TYPE te5 AS ENUM ('a','b','c');
-- cascade to the worker when table gets created
SET citus.enable_ddl_propagation TO off;
CREATE TYPE tc8 AS (a int, b int);
CREATE TYPE te6 AS ENUM ('a','b','c');
RESET citus.enable_ddl_propagation;
CREATE TABLE t6 (a int, b tc8, c te6);
SELECT create_distributed_table('t6', 'a');
create_distributed_table
--------------------------
(1 row)
RESET ROLE;
-- test ownership of all types
SELECT typname, usename FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid;
typname | usename
---------+----------
tc7 | typeuser
(1 row)
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid;$$);
run_command_on_workers
--------------------------------------
(localhost,57637,t,"(tc7,typeuser)")
(localhost,57638,t,"(tc7,typeuser)")
(2 rows)
SELECT typname, usename FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid;
typname | usename
---------+----------
te5 | typeuser
(1 row)
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid;$$);
run_command_on_workers
--------------------------------------
(localhost,57637,t,"(te5,typeuser)")
(localhost,57638,t,"(te5,typeuser)")
(2 rows)
SELECT typname, usename FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid;
typname | usename
---------+----------
tc8 | typeuser
(1 row)
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid;$$);
run_command_on_workers
--------------------------------------
(localhost,57637,t,"(tc8,typeuser)")
(localhost,57638,t,"(tc8,typeuser)")
(2 rows)
SELECT typname, usename FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid;
typname | usename
---------+----------
te6 | typeuser
(1 row)
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid;$$);
run_command_on_workers
--------------------------------------
(localhost,57637,t,"(te6,typeuser)")
(localhost,57638,t,"(te6,typeuser)")
(2 rows)
-- deleting the enum cascade will remove the type from the table and the workers
DROP TYPE te3 CASCADE;
NOTICE: drop cascades to table t5 column c
-- DELETE multiple types at once
DROP TYPE tc3, tc4, tc5 CASCADE;
NOTICE: drop cascades to table t5 column b
-- test if the types are deleted
SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname;
typname
---------
(0 rows)
SELECT run_command_on_workers($$SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname;$$);
run_command_on_workers
------------------------
(localhost,57637,t,"")
(localhost,57638,t,"")
(2 rows)
-- make sure attribute names are quoted correctly, no errors indicates types are propagated correctly
CREATE TYPE tc9 AS ("field-with-dashes" text COLLATE "POSIX");
ALTER TYPE tc9 ADD ATTRIBUTE "some-more" int, ADD ATTRIBUTE normal int;
ALTER TYPE tc9 RENAME ATTRIBUTE normal TO "not-so-normal";
-- test alter statements for non-distributed types, if they would be propagated they would
-- error, preventing from changing them
SET citus.enable_ddl_propagation TO off;
CREATE TYPE non_distributed_composite_type AS (a int, b int);
CREATE TYPE non_distributed_enum_type AS ENUM ('a', 'c');
SET citus.enable_ddl_propagation TO on;
ALTER TYPE non_distributed_composite_type ADD ATTRIBUTE c int;
ALTER TYPE non_distributed_composite_type RENAME ATTRIBUTE c TO d;
ALTER TYPE non_distributed_composite_type ALTER ATTRIBUTE d SET DATA TYPE text COLLATE "POSIX" CASCADE;
ALTER TYPE non_distributed_composite_type DROP ATTRIBUTE d;
ALTER TYPE non_distributed_composite_type OWNER TO typeuser;
ALTER TYPE non_distributed_composite_type RENAME TO non_distributed_composite_type_renamed;
ALTER TYPE non_distributed_composite_type_renamed RENAME TO non_distributed_composite_type;
ALTER TYPE non_distributed_composite_type SET SCHEMA type_tests2;
ALTER TYPE type_tests2.non_distributed_composite_type SET SCHEMA type_tests;
ALTER TYPE non_distributed_enum_type OWNER TO typeuser;
ALTER TYPE non_distributed_enum_type RENAME TO non_distributed_enum_type_renamed;
ALTER TYPE non_distributed_enum_type_renamed RENAME TO non_distributed_enum_type;
ALTER TYPE non_distributed_enum_type SET SCHEMA type_tests2;
ALTER TYPE type_tests2.non_distributed_enum_type SET SCHEMA type_tests;
ALTER TYPE non_distributed_enum_type ADD VALUE 'b' BEFORE 'c';
ALTER TYPE non_distributed_enum_type ADD VALUE 'd' AFTER 'c';
ALTER TYPE non_distributed_enum_type RENAME VALUE 'd' TO 'something-with-quotes''andstuff';
-- test all forms of alter statements on distributed types
CREATE TYPE distributed_composite_type AS (a int, b int);
CREATE TYPE distributed_enum_type AS ENUM ('a', 'c');
-- enforce distribution of types in every case
CREATE TABLE type_proc (a int, b distributed_composite_type, c distributed_enum_type);
SELECT create_distributed_table('type_proc','a');
create_distributed_table
--------------------------
(1 row)
DROP TABLE type_proc;
ALTER TYPE distributed_composite_type ADD ATTRIBUTE c int;
ALTER TYPE distributed_composite_type RENAME ATTRIBUTE c TO d;
ALTER TYPE distributed_composite_type ALTER ATTRIBUTE d SET DATA TYPE text COLLATE "POSIX" CASCADE;
ALTER TYPE distributed_composite_type DROP ATTRIBUTE d;
ALTER TYPE distributed_composite_type OWNER TO typeuser;
ALTER TYPE distributed_composite_type RENAME TO distributed_composite_type_renamed;
ALTER TYPE distributed_composite_type_renamed RENAME TO distributed_composite_type;
ALTER TYPE distributed_composite_type SET SCHEMA type_tests2;
ALTER TYPE type_tests2.distributed_composite_type SET SCHEMA type_tests;
ALTER TYPE distributed_enum_type OWNER TO typeuser;
ALTER TYPE distributed_enum_type RENAME TO distributed_enum_type_renamed;
ALTER TYPE distributed_enum_type_renamed RENAME TO distributed_enum_type;
ALTER TYPE distributed_enum_type SET SCHEMA type_tests2;
ALTER TYPE type_tests2.distributed_enum_type SET SCHEMA type_tests;
ALTER TYPE distributed_enum_type ADD VALUE 'b' BEFORE 'c';
ALTER TYPE distributed_enum_type ADD VALUE 'd' AFTER 'c';
ALTER TYPE distributed_enum_type RENAME VALUE 'd' TO 'something-with-quotes''andstuff';
-- make sure types are not distributed by default when feature flag is turned off
SET citus.enable_create_type_propagation TO off;
CREATE TYPE feature_flag_composite_type AS (a int, b int);
CREATE TYPE feature_flag_enum_type AS ENUM ('a', 'b');
-- verify types do not exist on workers
SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');
count
-------
2
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$);
run_command_on_workers
------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
-- verify they are still distributed when required
CREATE TABLE feature_flag_table (a int PRIMARY KEY, b feature_flag_composite_type, c feature_flag_enum_type);
SELECT create_distributed_table('feature_flag_table','a');
create_distributed_table
--------------------------
(1 row)
SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');
count
-------
2
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$);
run_command_on_workers
------------------------
(localhost,57637,t,2)
(localhost,57638,t,2)
(2 rows)
RESET citus.enable_create_type_propagation;
-- clear objects
SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA type_tests CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$);
run_command_on_workers
-----------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
DROP SCHEMA type_tests2 CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$);
run_command_on_workers
-----------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(2 rows)
DROP USER typeuser;
SELECT run_command_on_workers($$DROP USER typeuser;$$);
run_command_on_workers
---------------------------------
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(2 rows)

View File

@ -1,14 +1,6 @@
-- We have different output files for the executor. This is because
-- we don't mark transactions with ANALYZE as critical anymore, and
-- get WARNINGs instead of ERRORs.
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
t
(1 row)
SET citus.next_shard_id TO 12000000;
SELECT citus.mitmproxy('conn.allow()');
mitmproxy

View File

@ -1,138 +0,0 @@
-- We have different output files for the executor. The executor
-- with PG10 behaves like non-executor PG11, and with PG11 it
-- behaves like non-executor PG10.
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
f
(1 row)
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
-----------
(1 row)
SET citus.shard_count = 1;
SET citus.shard_replication_factor = 2; -- one shard per worker
SET citus.multi_shard_commit_protocol TO '1pc';
CREATE TABLE vacuum_test (key int, value int);
SELECT create_distributed_table('vacuum_test', 'key');
create_distributed_table
--------------------------
(1 row)
SELECT citus.clear_network_traffic();
clear_network_traffic
-----------------------
(1 row)
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()');
mitmproxy
-----------
(1 row)
VACUUM vacuum_test;
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:9060
SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()');
mitmproxy
-----------
(1 row)
ANALYZE vacuum_test;
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:9060
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
mitmproxy
-----------
(1 row)
ANALYZE vacuum_test;
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
WARNING: failed to commit transaction on localhost:9060
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
-- ANALYZE transactions being critical is an open question, see #2430
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass
);
-- the same tests with cancel
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')');
mitmproxy
-----------
(1 row)
VACUUM vacuum_test;
ERROR: canceling statement due to user request
SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')');
mitmproxy
-----------
(1 row)
ANALYZE vacuum_test;
ERROR: canceling statement due to user request
-- cancel during COMMIT should be ignored
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')');
mitmproxy
-----------
(1 row)
ANALYZE vacuum_test;
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
-----------
(1 row)
CREATE TABLE other_vacuum_test (key int, value int);
SELECT create_distributed_table('other_vacuum_test', 'key');
create_distributed_table
--------------------------
(1 row)
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()');
mitmproxy
-----------
(1 row)
VACUUM vacuum_test, other_vacuum_test;
ERROR: syntax error at or near ","
LINE 1: VACUUM vacuum_test, other_vacuum_test;
^
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')');
mitmproxy
-----------
(1 row)
VACUUM vacuum_test, other_vacuum_test;
ERROR: syntax error at or near ","
LINE 1: VACUUM vacuum_test, other_vacuum_test;
^
-- ==== Clean up, we're done here ====
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
-----------
(1 row)
DROP TABLE vacuum_test, other_vacuum_test;

View File

@ -1,14 +1,6 @@
-- We have different output files for the executor. This is because
-- we don't mark transactions with ANALYZE as critical anymore, and
-- get WARNINGs instead of ERRORs.
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
f
(1 row)
SET citus.next_shard_id TO 12000000;
SELECT citus.mitmproxy('conn.allow()');
mitmproxy

View File

@ -1,14 +1,6 @@
-- We have different output files for the executor. This is because
-- we don't mark transactions with ANALYZE as critical anymore, and
-- get WARNINGs instead of ERRORs.
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
t
(1 row)
SET citus.next_shard_id TO 12000000;
SELECT citus.mitmproxy('conn.allow()');
mitmproxy

View File

@ -1,15 +1,8 @@
--
--
-- Tests multiple commands in transactions where
-- there is foreign key relation between reference
-- tables and distributed tables
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
t
(1 row)
CREATE SCHEMA test_fkey_to_ref_in_tx;
SET search_path TO 'test_fkey_to_ref_in_tx';
SET citus.next_shard_id TO 2380000;
@ -81,7 +74,7 @@ BEGIN;
ROLLBACK;
-- case 1.2: SELECT to a reference table is followed by a multiple router SELECTs to a distributed table
BEGIN;
BEGIN;
SELECT count(*) FROM reference_table;
count
-------
@ -113,7 +106,7 @@ BEGIN;
(1 row)
ROLLBACK;
BEGIN;
BEGIN;
SELECT count(*) FROM transitive_reference_table;
count
-------
@ -190,7 +183,7 @@ BEGIN;
UPDATE on_update_fkey_table SET value_1 = 16 WHERE id = 18;
ROLLBACK;
-- case 1.5: SELECT to a reference table is followed by a DDL that touches fkey column
BEGIN;
BEGIN;
SELECT count(*) FROM reference_table;
count
-------
@ -202,7 +195,7 @@ DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
DEBUG: validating foreign key constraint "fkey"
ROLLBACK;
BEGIN;
BEGIN;
SELECT count(*) FROM transitive_reference_table;
count
-------
@ -215,7 +208,7 @@ DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_tabl
DEBUG: validating foreign key constraint "fkey"
ROLLBACK;
-- case 1.6: SELECT to a reference table is followed by an unrelated DDL
BEGIN;
BEGIN;
SELECT count(*) FROM reference_table;
count
-------
@ -226,7 +219,7 @@ BEGIN;
DEBUG: switching to sequential query execution mode
DETAIL: cannot execute parallel DDL on relation "on_update_fkey_table" after SELECT command on reference relation "reference_table" because there is a foreign key between them and "reference_table" has been accessed in this transaction
ROLLBACK;
BEGIN;
BEGIN;
SELECT count(*) FROM transitive_reference_table;
count
-------
@ -237,7 +230,7 @@ BEGIN;
DEBUG: switching to sequential query execution mode
DETAIL: cannot execute parallel DDL on relation "on_update_fkey_table" after SELECT command on reference relation "transitive_reference_table" because there is a foreign key between them and "transitive_reference_table" has been accessed in this transaction
ROLLBACK;
-- case 1.7.1: SELECT to a reference table is followed by a DDL that is on
-- case 1.7.1: SELECT to a reference table is followed by a DDL that is on
-- the foreign key column
BEGIN;
SELECT count(*) FROM reference_table;
@ -261,9 +254,9 @@ BEGIN;
SET LOCAL client_min_messages TO ERROR;
ALTER TABLE on_update_fkey_table DROP COLUMN value_1 CASCADE;
ROLLBACK;
-- case 1.7.2: SELECT to a reference table is followed by a DDL that is on
-- case 1.7.2: SELECT to a reference table is followed by a DDL that is on
-- the foreign key column after a parallel query has been executed
BEGIN;
BEGIN;
SELECT count(*) FROM unrelated_dist_table;
count
-------
@ -281,7 +274,7 @@ ERROR: cannot modify table "on_update_fkey_table" because there was a parallel
DETAIL: When there is a foreign key to a reference table, Citus needs to perform all operations over a single connection per node to ensure consistency.
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK;
BEGIN;
BEGIN;
SELECT count(*) FROM unrelated_dist_table;
count
-------
@ -299,9 +292,9 @@ ERROR: cannot modify table "on_update_fkey_table" because there was a parallel
DETAIL: When there is a foreign key to a reference table, Citus needs to perform all operations over a single connection per node to ensure consistency.
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK;
-- case 1.7.3: SELECT to a reference table is followed by a DDL that is not on
-- case 1.7.3: SELECT to a reference table is followed by a DDL that is not on
-- the foreign key column, and a parallel query has already been executed
BEGIN;
BEGIN;
SELECT count(*) FROM unrelated_dist_table;
count
-------
@ -319,7 +312,7 @@ ERROR: cannot execute parallel DDL on relation "on_update_fkey_table" after SEL
DETAIL: When there is a foreign key to a reference table, Citus needs to perform all operations over a single connection per node to ensure consistency.
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK;
BEGIN;
BEGIN;
SELECT count(*) FROM unrelated_dist_table;
count
-------
@ -1026,7 +1019,7 @@ ROLLBACK;
-- an unrelated update followed by update on the reference table and update
-- on the cascading distributed table
-- note that the UPDATE on the reference table will try to set the execution
-- mode to sequential, which will fail since there is an already opened
-- mode to sequential, which will fail since there is an already opened
-- parallel connections
BEGIN;
UPDATE unrelated_dist_table SET value_1 = 15;
@ -1060,7 +1053,7 @@ ROLLBACK;
-- already executed a parallel query
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
SELECT create_reference_table('test_table_1');
create_reference_table
------------------------
@ -1089,7 +1082,7 @@ ROLLBACK;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
SELECT create_reference_table('test_table_1');
create_reference_table
------------------------
@ -1117,7 +1110,6 @@ ROLLBACK;
-- parallel connection via create_distributed_table(), later
-- adding foreign key to reference table fails
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
create_reference_table
@ -1144,7 +1136,6 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
COMMIT;
-- same test with the above on sequential mode should work fine
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
@ -1165,7 +1156,7 @@ BEGIN;
SET LOCAL client_min_messages TO ERROR;
DROP TABLE test_table_1, test_table_2;
COMMIT;
-- similar test with the above, but this time the order of
-- similar test with the above, but this time the order of
-- create_distributed_table and create_reference_table is
-- changed
BEGIN;
@ -1246,7 +1237,6 @@ ROLLBACK;
-- make sure that we cannot create hash distributed tables with
-- foreign keys to reference tables when they have data in it
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
@ -1270,7 +1260,6 @@ COMMIT;
-- the same test with above in sequential mode would still not work
-- since COPY cannot be executed in sequential mode
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY);
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
@ -1286,17 +1275,15 @@ NOTICE: Copying data from local table...
SELECT create_distributed_table('test_table_2', 'id');
ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty
HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty.
-- make sure that the output isn't too verbose
SET LOCAL client_min_messages TO ERROR;
ERROR: current transaction is aborted, commands ignored until end of transaction block
DROP TABLE test_table_2, test_table_1;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
COMMIT;
-- we should be able to execute and DML/DDL/SELECT after we've
-- switched to sequential via create_distributed_table
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
SELECT create_reference_table('test_table_1');
@ -1354,11 +1341,11 @@ SELECT create_distributed_table('distributed_table', 'id');
(1 row)
ALTER TABLE
distributed_table
ADD CONSTRAINT
fkey_delete FOREIGN KEY(value_1)
REFERENCES
ALTER TABLE
distributed_table
ADD CONSTRAINT
fkey_delete FOREIGN KEY(value_1)
REFERENCES
reference_table(id) ON DELETE CASCADE;
INSERT INTO reference_table SELECT i FROM generate_series(0, 10) i;
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
@ -1370,7 +1357,7 @@ DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- this query returns 100 rows in Postgres, but not in Citus
-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion
WITH t1 AS (DELETE FROM reference_table RETURNING id)
WITH t1 AS (DELETE FROM reference_table RETURNING id)
DELETE FROM distributed_table USING t1 WHERE value_1 = t1.id RETURNING *;
DEBUG: generating subplan 170_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
DEBUG: Plan 170 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.distributed_table USING (SELECT intermediate_result.id FROM read_intermediate_result('170_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) RETURNING distributed_table.id, distributed_table.value_1, t1.id
@ -1391,7 +1378,7 @@ DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- this query returns 100 rows in Postgres, but not in Citus
-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion
WITH t1 AS (DELETE FROM reference_table RETURNING id)
WITH t1 AS (DELETE FROM reference_table RETURNING id)
SELECT count(*) FROM distributed_table, t1 WHERE value_1 = t1.id;
DEBUG: generating subplan 174_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
DEBUG: Plan 174 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('174_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id)
@ -1402,7 +1389,7 @@ DETAIL: Reference relation "reference_table" is modified, which might lead to d
0
(1 row)
-- this query should fail since we first to a parallel access to a distributed table
-- this query should fail since we first to a parallel access to a distributed table
-- with t1, and then access to t2
WITH t1 AS (DELETE FROM distributed_table RETURNING id),
t2 AS (DELETE FROM reference_table RETURNING id)
@ -1423,7 +1410,6 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m
-- finally, make sure that we can execute the same queries
-- in the sequential mode
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
WITH t1 AS (DELETE FROM distributed_table RETURNING id),
t2 AS (DELETE FROM reference_table RETURNING id)
@ -1438,7 +1424,6 @@ DEBUG: Plan 181 query after replacing subqueries and CTEs: SELECT count(*) AS c
ROLLBACK;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
WITH t1 AS (DELETE FROM distributed_table RETURNING id)
DELETE FROM reference_table RETURNING id;

View File

@ -1,13 +1,6 @@
--
-- FOREIGN_KEY_TO_REFERENCE_TABLE
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
version_above_nine
--------------------
t
(1 row)
CREATE SCHEMA fkey_reference_table;
SET search_path TO 'fkey_reference_table';
SET citus.shard_replication_factor TO 1;
@ -15,12 +8,12 @@ SET citus.shard_count TO 8;
SET citus.next_shard_id TO 7000000;
SET citus.next_placement_id TO 7000000;
CREATE TYPE foreign_details AS (name text, relid text, refd_relid text);
CREATE VIEW table_fkeys_in_workers AS
CREATE VIEW table_fkeys_in_workers AS
SELECT
(json_populate_record(NULL::foreign_details,
json_array_elements_text((run_command_on_workers( $$
(json_populate_record(NULL::foreign_details,
json_array_elements_text((run_command_on_workers( $$
SELECT
COALESCE(json_agg(row_to_json(d)), '[]'::json)
COALESCE(json_agg(row_to_json(d)), '[]'::json)
FROM
(
SELECT
@ -405,7 +398,7 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A
(8 rows)
DROP TABLE referencing_table;
-- foreign keys are supported either in between distributed tables including the
-- foreign keys are supported either in between distributed tables including the
-- distribution column or from distributed tables to reference tables.
CREATE TABLE referencing_table(id int, ref_id int);
SELECT create_distributed_table('referencing_table', 'ref_id', 'append');
@ -568,7 +561,7 @@ SELECT count(*) FROM referencing_table WHERE ref_id = 1;
DROP TABLE referencing_table;
DROP TABLE referenced_table;
-- foreign key as composite key
CREATE TYPE fkey_reference_table.composite AS (key1 int, key2 int);
CREATE TYPE fkey_reference_table.composite AS (key1 int, key2 int);
CREATE TABLE referenced_table(test_column composite, PRIMARY KEY(test_column));
CREATE TABLE referencing_table(id int, referencing_composite composite);
SELECT create_reference_table('referenced_table');
@ -597,7 +590,7 @@ DROP TABLE referenced_table CASCADE;
NOTICE: drop cascades to constraint fkey_ref on table referencing_table
DROP TABLE referencing_table CASCADE;
-- In the following test, we'll use a SERIAL column as the referenced column
-- in the foreign constraint. We'll first show that and insert on non-serial
-- in the foreign constraint. We'll first show that and insert on non-serial
-- column successfully inserts into the serial and referenced column.
-- Accordingly, the inserts into the referencing table which references to the
-- serial column will be successful.
@ -629,9 +622,9 @@ DROP TABLE referenced_table CASCADE;
NOTICE: drop cascades to constraint fkey_ref on table referencing_table
DROP TABLE referencing_table CASCADE;
-- In the following test, we'll use a SERIAL column as the referencing column
-- in the foreign constraint. We'll first show that the values that exist
-- in the foreign constraint. We'll first show that the values that exist
-- in the referenced tables are successfully generated by the serial column
-- and inserted to the distributed table. However, if the values that are generated
-- and inserted to the distributed table. However, if the values that are generated
-- by serial column do not exist on the referenced table, the query fails.
CREATE TABLE referenced_table(test_column int PRIMARY KEY, test_column2 int);
CREATE TABLE referencing_table(id int, ref_id SERIAL);
@ -653,16 +646,16 @@ INSERT INTO referenced_table SELECT x,x FROM generate_series(1,1000) AS f(x);
INSERT INTO referencing_table(id) SELECT x FROM generate_series(1,1000) AS f(x);
-- Fails for non existing value inserts (serial is already incremented)
INSERT INTO referencing_table(id) SELECT x FROM generate_series(1,10) AS f(x);
ERROR: insert or update on table "referencing_table_7000195" violates foreign key constraint "fkey_ref_7000195"
DETAIL: Key (ref_id)=(1009) is not present in table "referenced_table_7000187".
ERROR: insert or update on table "referencing_table_7000190" violates foreign key constraint "fkey_ref_7000190"
DETAIL: Key (ref_id)=(1004) is not present in table "referenced_table_7000187".
DROP TABLE referenced_table CASCADE;
NOTICE: drop cascades to constraint fkey_ref on table referencing_table
DROP TABLE referencing_table CASCADE;
-- In the following test, we'll use a SERIAL column as the referencing column
-- and referenced columns in a foreign constraint. We'll first show that the
-- the inserts into referenced column will successfully generate and insert
-- and referenced columns in a foreign constraint. We'll first show that the
-- the inserts into referenced column will successfully generate and insert
-- data into serial column. Then, we will be successfully insert the same amount
-- of data into referencing table. However, if the values that are generated
-- of data into referencing table. However, if the values that are generated
-- by serial column do not exist on the referenced table, the query fails.
CREATE TABLE referenced_table(test_column SERIAL PRIMARY KEY, test_column2 int);
CREATE TABLE referencing_table(id int, ref_id SERIAL);
@ -684,8 +677,8 @@ INSERT INTO referenced_table(test_column2) SELECT x FROM generate_series(1,1000)
INSERT INTO referencing_table(id) SELECT x FROM generate_series(1,1000) AS f(x);
-- Fails for non existing value inserts (serial is already incremented)
INSERT INTO referencing_table(id) SELECT x FROM generate_series(1,10) AS f(x);
ERROR: insert or update on table "referencing_table_7000200" violates foreign key constraint "fkey_ref_7000200"
DETAIL: Key (ref_id)=(1003) is not present in table "referenced_table_7000196".
ERROR: insert or update on table "referencing_table_7000199" violates foreign key constraint "fkey_ref_7000199"
DETAIL: Key (ref_id)=(1004) is not present in table "referenced_table_7000196".
DROP TABLE referenced_table CASCADE;
NOTICE: drop cascades to constraint fkey_ref on table referencing_table
DROP TABLE referencing_table CASCADE;
@ -713,7 +706,7 @@ INSERT INTO referencing_table SELECT x,(random()*1000)::int FROM generate_series
DROP TABLE referenced_table CASCADE;
NOTICE: drop cascades to constraint fkey_ref on table referencing_table
DROP TABLE referencing_table CASCADE;
-- In the following tests, we create a foreign constraint with
-- In the following tests, we create a foreign constraint with
-- ON UPDATE CASCADE and see if it works properly with cascading upsert
CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column));
CREATE TABLE referencing_table(id int, ref_id int DEFAULT -1);
@ -771,11 +764,11 @@ COMMIT;
DROP TABLE referenced_table CASCADE;
NOTICE: drop cascades to constraint referencing_table_ref_id_fkey on table referencing_table
DROP TABLE referencing_table CASCADE;
-- Chained references
-- Chained references
-- In the following test, we create foreign keys from one column in a distributed
-- table to two reference tables. We expect to see that even if a data exist in
-- one reference table, it is not going to be inserted in to referencing table
-- because of lack of the key in the other table. Data can only be inserted into
-- because of lack of the key in the other table. Data can only be inserted into
-- referencing table if it exists in both referenced tables.
-- Additionally, delete or update in one referenced table should cascade properly.
CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column));
@ -826,16 +819,16 @@ INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(0,1000) AS f(x);
INSERT INTO referenced_table2 SELECT x, x+1 FROM generate_series(500,1500) AS f(x);
-- should fail
INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(0,1500) AS f(x);
ERROR: insert or update on table "referencing_table_7000229" violates foreign key constraint "foreign_key_2_7000229"
DETAIL: Key (id)=(0) is not present in table "referenced_table2_7000225".
ERROR: insert or update on table "referencing_table_7000230" violates foreign key constraint "foreign_key_2_7000230"
DETAIL: Key (id)=(28) is not present in table "referenced_table2_7000225".
-- should fail
INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(0,400) AS f(x);
ERROR: insert or update on table "referencing_table_7000229" violates foreign key constraint "foreign_key_2_7000229"
DETAIL: Key (id)=(0) is not present in table "referenced_table2_7000225".
ERROR: insert or update on table "referencing_table_7000226" violates foreign key constraint "foreign_key_2_7000226"
DETAIL: Key (id)=(1) is not present in table "referenced_table2_7000225".
-- should fail
INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(1000,1400) AS f(x);
ERROR: insert or update on table "referencing_table_7000231" violates foreign key constraint "fkey_ref_7000231"
DETAIL: Key (id)=(1001) is not present in table "referenced_table_7000224".
ERROR: insert or update on table "referencing_table_7000228" violates foreign key constraint "fkey_ref_7000228"
DETAIL: Key (id)=(1015) is not present in table "referenced_table_7000224".
-- should succeed
INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(600,900) AS f(x);
SELECT count(*) FROM referencing_table;
@ -898,10 +891,10 @@ DROP TABLE referenced_table2 CASCADE;
NOTICE: drop cascades to constraint referencing_table_id_fkey1 on table referencing_table
DROP TABLE referencing_table CASCADE;
-- In the following test, we create foreign keys from two columns in a distributed
-- table to two reference tables separately. We expect to see that even if a data
-- table to two reference tables separately. We expect to see that even if a data
-- exist in one reference table for one column, it is not going to be inserted in
-- to referencing table because the other constraint doesn't hold. Data can only
-- be inserted into referencing table if both columns exist in respective columns
-- to referencing table because the other constraint doesn't hold. Data can only
-- be inserted into referencing table if both columns exist in respective columns
-- in referenced tables.
-- Additionally, delete or update in one referenced table should cascade properly.
CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column));
@ -954,13 +947,13 @@ INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(0,1000) AS f(x);
INSERT INTO referenced_table2 SELECT x, x+1 FROM generate_series(500,1500) AS f(x);
-- should fail
INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(0,1500) AS f(x);
ERROR: insert or update on table "referencing_table_7000249" violates foreign key constraint "foreign_key_2_7000249"
ERROR: insert or update on table "referencing_table_7000246" violates foreign key constraint "foreign_key_2_7000246"
-- should fail
INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(0,400) AS f(x);
ERROR: insert or update on table "referencing_table_7000249" violates foreign key constraint "foreign_key_2_7000249"
ERROR: insert or update on table "referencing_table_7000246" violates foreign key constraint "foreign_key_2_7000246"
-- should fail
INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(1000,1400) AS f(x);
ERROR: insert or update on table "referencing_table_7000251" violates foreign key constraint "fkey_ref_7000251"
ERROR: insert or update on table "referencing_table_7000248" violates foreign key constraint "fkey_ref_7000248"
-- should succeed
INSERT INTO referencing_table SELECT x, x+501 FROM generate_series(0,1000) AS f(x);
SELECT count(*) FROM referencing_table;
@ -1026,8 +1019,8 @@ NOTICE: drop cascades to constraint foreign_key_2 on table referencing_table
DROP TABLE referencing_table CASCADE;
\set VERBOSITY default
-- two distributed tables are referencing to one reference table and
-- in the same time the distributed table 2 is referencing to
-- distributed table 1. Thus, we have a triangular
-- in the same time the distributed table 2 is referencing to
-- distributed table 1. Thus, we have a triangular
-- distributed table 1 has a foreign key from the distribution column to reference table
-- distributed table 2 has a foreign key from a non-distribution column to reference table
-- distributed table 2 has a foreign key to distributed table 1 on the distribution column
@ -1092,14 +1085,14 @@ SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' A
INSERT INTO referenced_table SELECT x, x+1 FROM generate_series(0,1000) AS f(x);
-- should fail
INSERT INTO referencing_table2 SELECT x, x+1 FROM generate_series(0,100) AS f(x);
ERROR: insert or update on table "referencing_table2_7000276" violates foreign key constraint "fkey_ref_to_dist_7000276"
DETAIL: Key (id)=(0) is not present in table "referencing_table_7000268".
ERROR: insert or update on table "referencing_table2_7000273" violates foreign key constraint "fkey_ref_to_dist_7000273"
DETAIL: Key (id)=(1) is not present in table "referencing_table_7000265".
-- should succeed
INSERT INTO referencing_table SELECT x, x+1 FROM generate_series(0,400) AS f(x);
-- should fail
INSERT INTO referencing_table2 SELECT x, x+1 FROM generate_series(200,500) AS f(x);
ERROR: insert or update on table "referencing_table2_7000274" violates foreign key constraint "fkey_ref_to_dist_7000274"
DETAIL: Key (id)=(403) is not present in table "referencing_table_7000266".
ERROR: insert or update on table "referencing_table2_7000273" violates foreign key constraint "fkey_ref_to_dist_7000273"
DETAIL: Key (id)=(401) is not present in table "referencing_table_7000265".
-- should succeed
INSERT INTO referencing_table2 SELECT x, x+1 FROM generate_series(0,300) AS f(x);
DELETE FROM referenced_table WHERE test_column < 200;
@ -1167,7 +1160,7 @@ DROP TABLE referencing_table CASCADE;
NOTICE: drop cascades to constraint referencing_table2_id_fkey on table referencing_table2
DROP TABLE referencing_table2 CASCADE;
\set VERBOSITY default
-- In this test we have a chained relationship in form of
-- In this test we have a chained relationship in form of
-- distributed table (referencing_referencing_table) has a foreign key with two columns
-- to another distributed table (referencing_table)
-- referencing_table has another foreign key with 2 columns to referenced_table.
@ -1231,11 +1224,11 @@ DROP TABLE referencing_table CASCADE;
NOTICE: drop cascades to constraint referencing_referencing_table_id_fkey on table referencing_referencing_table
DROP TABLE referencing_referencing_table;
-- test if create_distributed_table works in transactions with some edge cases
-- the following checks if create_distributed_table works on foreign keys when
-- the following checks if create_distributed_table works on foreign keys when
-- one of them is a self-referencing table of multiple distributed tables
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
SELECT create_reference_table('test_table_1');
create_reference_table
------------------------
@ -1308,7 +1301,6 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
ROLLBACK;
-- make sure that we fail if we need parallel data load
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
@ -1633,7 +1625,7 @@ SELECT create_distributed_table('test_table_2', 'id');
(1 row)
INSERT INTO test_table_1 VALUES (1),(2),(3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
TRUNCATE test_table_1 CASCADE;
NOTICE: truncate cascades to table "test_table_2"
SELECT * FROM test_table_2;
@ -1658,7 +1650,7 @@ SELECT create_distributed_table('test_table_2', 'id');
(1 row)
INSERT INTO test_table_1 VALUES (1),(2),(3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
BEGIN;
TRUNCATE test_table_1 CASCADE;
NOTICE: truncate cascades to table "test_table_2"
@ -1712,7 +1704,7 @@ SELECT create_distributed_table('test_table_2', 'id');
(1 row)
INSERT INTO test_table_1 VALUES (1),(2),(3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
TRUNCATE test_table_2 CASCADE;
SELECT * FROM test_table_2;
id | value_1
@ -1744,7 +1736,7 @@ SELECT create_distributed_table('test_table_2', 'id');
(1 row)
INSERT INTO test_table_1 VALUES (1),(2),(3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
BEGIN;
TRUNCATE test_table_2 CASCADE;
COMMIT;
@ -1763,7 +1755,7 @@ SELECT * FROM test_table_1;
DROP TABLE test_table_1, test_table_2;
-- check if we successfuly set multi_shard_modify_mode to sequential after sequentially running DDLs
-- in transaction since the upcoming DDLs need to run sequentially.
-- in transaction since the upcoming DDLs need to run sequentially.
CREATE TABLE test_table_1(id int PRIMARY KEY);
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
CREATE TABLE test_table_3(id int PRIMARY KEY, value_1 int);

File diff suppressed because it is too large Load Diff

View File

@ -1,400 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-drop s1-commit s2-commit s1-select-count
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-drop: DROP TABLE drop_hash;
step s2-drop: DROP TABLE drop_hash; <waiting ...>
step s1-commit: COMMIT;
step s2-drop: <... completed>
error in steps s1-commit s2-drop: ERROR: table "drop_hash" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-drop: DROP TABLE drop_hash;
step s2-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-create-index: <... completed>
error in steps s1-commit s2-ddl-create-index: ERROR: relation "drop_hash" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
restore_isolation_tester_func
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-drop s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id);
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-drop: DROP TABLE drop_hash;
step s2-ddl-drop-index: DROP INDEX drop_hash_index; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-drop-index: <... completed>
error in steps s1-commit s2-ddl-drop-index: ERROR: index "drop_hash_index" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s1-drop s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s1-drop: DROP TABLE drop_hash;
step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY drop_hash_index ON drop_hash(id); <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-create-index-concurrently: <... completed>
error in steps s1-commit s2-ddl-create-index-concurrently: ERROR: relation "drop_hash" does not exist
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-drop: DROP TABLE drop_hash;
step s2-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-add-column: <... completed>
error in steps s1-commit s2-ddl-add-column: ERROR: relation "drop_hash" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
restore_isolation_tester_func
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-drop s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-drop: DROP TABLE drop_hash;
step s2-ddl-drop-column: ALTER TABLE drop_hash DROP new_column; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-drop-column: <... completed>
error in steps s1-commit s2-ddl-drop-column: ERROR: relation "drop_hash" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-drop: DROP TABLE drop_hash;
step s2-ddl-rename-column: ALTER TABLE drop_hash RENAME data TO new_column; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-rename-column: <... completed>
error in steps s1-commit s2-ddl-rename-column: ERROR: relation "drop_hash" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-table-size s1-commit s2-commit s1-select-count
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-drop: DROP TABLE drop_hash;
step s2-table-size: SELECT citus_total_relation_size('drop_hash'); <waiting ...>
step s1-commit: COMMIT;
step s2-table-size: <... completed>
error in steps s1-commit s2-table-size: ERROR: could not compute table size: relation does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-drop: DROP TABLE drop_hash;
step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DROP FROM drop_hash;');
ERROR: syntax error at or near "FROM"
step s1-commit: COMMIT;
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-drop s2-distribute-table s1-commit s2-commit s1-select-count
create_distributed_table
step s1-drop: DROP TABLE drop_hash;
step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-drop: DROP TABLE drop_hash;
step s2-distribute-table: SELECT create_distributed_table('drop_hash', 'id'); <waiting ...>
step s1-commit: COMMIT;
step s2-distribute-table: <... completed>
error in steps s1-commit s2-distribute-table: ERROR: could not create distributed table: relation does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id);
step s2-drop: DROP TABLE drop_hash; <waiting ...>
step s1-commit: COMMIT;
step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
restore_isolation_tester_func
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id);
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-ddl-drop-index: DROP INDEX drop_hash_index;
step s2-drop: DROP TABLE drop_hash; <waiting ...>
step s1-commit: COMMIT;
step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0;
step s2-drop: DROP TABLE drop_hash; <waiting ...>
step s1-commit: COMMIT;
step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
restore_isolation_tester_func
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-ddl-drop-column: ALTER TABLE drop_hash DROP new_column;
step s2-drop: DROP TABLE drop_hash; <waiting ...>
step s1-commit: COMMIT;
step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-ddl-rename-column: ALTER TABLE drop_hash RENAME data TO new_column;
step s2-drop: DROP TABLE drop_hash; <waiting ...>
step s1-commit: COMMIT;
step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-drop s1-commit s2-commit s1-select-count
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-table-size: SELECT citus_total_relation_size('drop_hash');
citus_total_relation_size
57344
step s2-drop: DROP TABLE drop_hash;
step s1-commit: COMMIT;
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-drop s1-commit s2-commit s1-select-count
create_distributed_table
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DROP FROM drop_hash;');
ERROR: syntax error at or near "FROM"
step s2-drop: DROP TABLE drop_hash;
step s1-commit: COMMIT;
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-drop s1-commit s2-commit s1-select-count
create_distributed_table
step s1-drop: DROP TABLE drop_hash;
step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
step s1-begin: BEGIN;
step s2-begin: BEGIN;
step s1-distribute-table: SELECT create_distributed_table('drop_hash', 'id');
create_distributed_table
step s2-drop: DROP TABLE drop_hash; <waiting ...>
step s1-commit: COMMIT;
step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func

View File

@ -1,570 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
15
starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-router-select: SELECT * FROM partitioned_copy WHERE id = 1;
id data int_data
1 b 1
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-real-time-select: SELECT * FROM partitioned_copy ORDER BY 1, 2;
id data int_data
0 a 0
1 b 1
2 c 2
3 d 3
4 e 4
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-task-tracker-select:
SET citus.task_executor_type TO "task-tracker";
SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
id data int_data id data int_data
0 a 0 0 a 0
1 b 1 1 b 1
2 c 2 2 c 2
3 d 3 3 d 3
4 e 4 4 e 4
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-insert: INSERT INTO partitioned_copy VALUES(0, 'k', 0);
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
11
starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-insert-select: INSERT INTO partitioned_copy SELECT * FROM partitioned_copy;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
15
starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-update: UPDATE partitioned_copy SET data = 'l' WHERE id = 0;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-delete: DELETE FROM partitioned_copy WHERE id = 1;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
9
starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-truncate: TRUNCATE partitioned_copy; <waiting ...>
step s1-commit: COMMIT;
step s2-truncate: <... completed>
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
0
starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-drop: DROP TABLE partitioned_copy; <waiting ...>
step s1-commit: COMMIT;
step s2-drop: <... completed>
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
ERROR: relation "partitioned_copy" does not exist
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-add-column: <... completed>
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0;
step s1-begin: BEGIN;
step s1-copy-additional-column: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV;
step s2-ddl-drop-column: ALTER TABLE partitioned_copy DROP new_column; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-drop-column: <... completed>
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-ddl-rename-column: ALTER TABLE partitioned_copy RENAME data TO new_column; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-rename-column: <... completed>
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-table-size: SELECT citus_total_relation_size('partitioned_copy');
citus_total_relation_size
32768
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-master-modify-multiple-shards: DELETE FROM partitioned_copy;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
5
starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-master-drop-all-shards: SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); <waiting ...>
step s1-commit: COMMIT;
step s2-master-drop-all-shards: <... completed>
master_drop_all_shards
4
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
0
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count
create_distributed_table
step s1-drop: DROP TABLE partitioned_copy;
step s1-create-non-distributed-table: CREATE TABLE partitioned_copy(id integer, data text, int_data int); COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-distribute-table: SELECT create_distributed_table('partitioned_copy', 'id'); <waiting ...>
step s1-commit: COMMIT;
step s2-distribute-table: <... completed>
create_distributed_table
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
15
starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-router-select: SELECT * FROM partitioned_copy WHERE id = 1;
id data int_data
1 b 1
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-real-time-select: SELECT * FROM partitioned_copy ORDER BY 1, 2;
id data int_data
0 a 0
1 b 1
2 c 2
3 d 3
4 e 4
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-task-tracker-select:
SET citus.task_executor_type TO "task-tracker";
SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
id data int_data id data int_data
0 a 0 0 a 0
1 b 1 1 b 1
2 c 2 2 c 2
3 d 3 3 d 3
4 e 4 4 e 4
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-insert: INSERT INTO partitioned_copy VALUES(0, 'k', 0);
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
11
starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-insert-select: INSERT INTO partitioned_copy SELECT * FROM partitioned_copy;
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
15
starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-update: UPDATE partitioned_copy SET data = 'l' WHERE id = 0;
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-delete: DELETE FROM partitioned_copy WHERE id = 1;
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
9
starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-truncate: TRUNCATE partitioned_copy;
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
5
starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-drop: DROP TABLE partitioned_copy;
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
error in steps s1-commit s2-copy: ERROR: relation "partitioned_copy" does not exist
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
ERROR: relation "partitioned_copy" does not exist
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0;
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
error in steps s1-commit s2-copy: ERROR: missing data for column "new_column"
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
5
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0;
step s1-begin: BEGIN;
step s1-ddl-drop-column: ALTER TABLE partitioned_copy DROP new_column;
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-ddl-rename-column: ALTER TABLE partitioned_copy RENAME data TO new_column;
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-table-size: SELECT citus_total_relation_size('partitioned_copy');
citus_total_relation_size
32768
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
10
starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-master-modify-multiple-shards: DELETE FROM partitioned_copy;
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
5
starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count
create_distributed_table
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-master-drop-all-shards: SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy');
master_drop_all_shards
4
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
0
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count
create_distributed_table
step s1-drop: DROP TABLE partitioned_copy;
step s1-create-non-distributed-table: CREATE TABLE partitioned_copy(id integer, data text, int_data int); COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-distribute-table: SELECT create_distributed_table('partitioned_copy', 'id');
create_distributed_table
step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM partitioned_copy;
count
15

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +1,6 @@
--
-- MULTI_CREATE_TABLE_NEW_FEATURES
--
-- print major version to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+') AS major_version;
major_version
---------------
10
(1 row)
-- Verify that the GENERATED ... AS IDENTITY feature in PostgreSQL 10
-- is forbidden in distributed tables.
CREATE TABLE table_identity_col (

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 11 AS server_verion_eleven_and_above
\gset
\if :server_verion_eleven_and_above
\else
\q
\endif
--
-- Regression tests for deparsing ALTER/DROP PROCEDURE Queries
--
@ -38,14 +31,6 @@ CREATE SCHEMA procedure_tests;
SET search_path TO procedure_tests;
SET citus.shard_count TO 4;
SET client_min_messages TO INFO;
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
t
(1 row)
CREATE FUNCTION deparse_test(text)
RETURNS text
AS 'citus'

View File

@ -1,6 +0,0 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 11 AS server_verion_eleven_and_above
\gset
\if :server_verion_eleven_and_above
\else
\q

View File

@ -2,14 +2,6 @@
-- MULTI_EXPLAIN
--
SET citus.next_shard_id TO 570000;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
version_above_nine
--------------------
t
(1 row)
\a\t
RESET citus.task_executor_type;
SET citus.explain_distributed_queries TO on;
@ -786,7 +778,6 @@ t
SELECT true AS valid FROM explain_json($$
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
t
-- Test multi shard update
EXPLAIN (COSTS FALSE)
UPDATE lineitem_hash_part
@ -810,7 +801,6 @@ Custom Scan (Citus Adaptive)
Node: host=localhost port=57638 dbname=regression
-> Update on lineitem_hash_part_360044 lineitem_hash_part
-> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
EXPLAIN (COSTS FALSE)
UPDATE lineitem_hash_part
SET l_suppkey = 12
@ -1034,8 +1024,8 @@ SELECT true AS valid FROM explain_xml($$
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey$$);
t
-- make sure that EXPLAIN works without
-- problems for queries that inlvolves only
-- make sure that EXPLAIN works without
-- problems for queries that inlvolves only
-- reference tables
SELECT true AS valid FROM explain_xml($$
SELECT count(*)

View File

@ -3,13 +3,6 @@
--
-- Check that we can run CREATE INDEX and DROP INDEX statements on distributed
-- tables.
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS server_version_above_ten;
server_version_above_ten
--------------------------
t
(1 row)
--
-- CREATE TEST TABLES
--

View File

@ -1,312 +0,0 @@
--
-- MULTI_INDEX_STATEMENTS
--
-- Check that we can run CREATE INDEX and DROP INDEX statements on distributed
-- tables.
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS server_version_above_ten;
server_version_above_ten
--------------------------
f
(1 row)
--
-- CREATE TEST TABLES
--
SET citus.next_shard_id TO 102080;
CREATE TABLE index_test_range(a int, b int, c int);
SELECT create_distributed_table('index_test_range', 'a', 'range');
create_distributed_table
--------------------------
(1 row)
SELECT master_create_empty_shard('index_test_range');
master_create_empty_shard
---------------------------
102080
(1 row)
SELECT master_create_empty_shard('index_test_range');
master_create_empty_shard
---------------------------
102081
(1 row)
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 2;
CREATE TABLE index_test_hash(a int, b int, c int);
SELECT create_distributed_table('index_test_hash', 'a', 'hash');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE index_test_append(a int, b int, c int);
SELECT create_distributed_table('index_test_append', 'a', 'append');
create_distributed_table
--------------------------
(1 row)
SELECT master_create_empty_shard('index_test_append');
master_create_empty_shard
---------------------------
102090
(1 row)
SELECT master_create_empty_shard('index_test_append');
master_create_empty_shard
---------------------------
102091
(1 row)
--
-- CREATE INDEX
--
-- Verify that we can create different types of indexes
CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey);
CREATE INDEX lineitem_partkey_desc_index ON lineitem (l_partkey DESC);
CREATE INDEX lineitem_partial_index ON lineitem (l_shipdate)
WHERE l_shipdate < '1995-01-01';
CREATE INDEX lineitem_colref_index ON lineitem (record_ne(lineitem.*, NULL));
SET client_min_messages = ERROR; -- avoid version dependant warning about WAL
CREATE INDEX lineitem_orderkey_hash_index ON lineitem USING hash (l_partkey);
CREATE UNIQUE INDEX index_test_range_index_a ON index_test_range(a);
CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range(a,b);
CREATE UNIQUE INDEX index_test_hash_index_a ON index_test_hash(a);
CREATE UNIQUE INDEX index_test_hash_index_a_b ON index_test_hash(a,b);
CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON index_test_hash(a,b) WHERE c IS NOT NULL;
CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range(a,b) WHERE c IS NOT NULL;
CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON index_test_hash(a) INCLUDE (b,c);
ERROR: syntax error at or near "INCLUDE"
LINE 1: ...index_test_hash_index_a_b_c ON index_test_hash(a) INCLUDE (b...
^
RESET client_min_messages;
-- Verify that we handle if not exists statements correctly
CREATE INDEX lineitem_orderkey_index on lineitem(l_orderkey);
ERROR: relation "lineitem_orderkey_index" already exists
CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on lineitem(l_orderkey);
NOTICE: relation "lineitem_orderkey_index" already exists, skipping
CREATE INDEX IF NOT EXISTS lineitem_orderkey_index_new on lineitem(l_orderkey);
-- Verify if not exists behavior with an index with same name on a different table
CREATE INDEX lineitem_orderkey_index on index_test_hash(a);
ERROR: relation "lineitem_orderkey_index" already exists
CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on index_test_hash(a);
NOTICE: relation "lineitem_orderkey_index" already exists, skipping
-- Verify that we can create indexes concurrently
CREATE INDEX CONCURRENTLY lineitem_concurrently_index ON lineitem (l_orderkey);
-- Verify that we warn out on CLUSTER command for distributed tables and no parameter
CLUSTER index_test_hash USING index_test_hash_index_a;
WARNING: not propagating CLUSTER command to worker nodes
CLUSTER;
WARNING: not propagating CLUSTER command to worker nodes
-- Verify that no-name local CREATE INDEX CONCURRENTLY works
CREATE TABLE local_table (id integer, name text);
CREATE INDEX CONCURRENTLY local_table_index ON local_table(id);
-- Vefify we don't warn out on CLUSTER command for local tables
CLUSTER local_table USING local_table_index;
DROP TABLE local_table;
-- Verify that all indexes got created on the master node and one of the workers
SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname;
schemaname | tablename | indexname | tablespace | indexdef
------------+------------------+------------------------------------+------------+----------------------------------------------------------------------------------------------------------------------------
public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a)
public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b)
public | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON public.index_test_hash USING btree (a, b) WHERE (c IS NOT NULL)
public | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON public.index_test_range USING btree (a)
public | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON public.index_test_range USING btree (a, b)
public | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON public.index_test_range USING btree (a, b) WHERE (c IS NOT NULL)
public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON public.lineitem USING btree (record_ne(lineitem.*, NULL::record))
public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey)
public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON public.lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date)
public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC)
public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber)
public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate)
(15 rows)
\c - - - :worker_1_port
SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1);
count
-------
9
(1 row)
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%';
count
-------
24
(1 row)
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%';
count
-------
6
(1 row)
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%';
count
-------
0
(1 row)
\c - - - :master_port
-- Verify that we error out on unsupported statement types
CREATE UNIQUE INDEX try_index ON lineitem (l_orderkey);
ERROR: creating unique indexes on append-partitioned tables is currently unsupported
CREATE INDEX try_index ON lineitem (l_orderkey) TABLESPACE newtablespace;
ERROR: specifying tablespaces with CREATE INDEX statements is currently unsupported
CREATE UNIQUE INDEX try_unique_range_index ON index_test_range(b);
ERROR: creating unique indexes on non-partition columns is currently unsupported
CREATE UNIQUE INDEX try_unique_range_index_partial ON index_test_range(b) WHERE c IS NOT NULL;
ERROR: creating unique indexes on non-partition columns is currently unsupported
CREATE UNIQUE INDEX try_unique_hash_index ON index_test_hash(b);
ERROR: creating unique indexes on non-partition columns is currently unsupported
CREATE UNIQUE INDEX try_unique_hash_index_partial ON index_test_hash(b) WHERE c IS NOT NULL;
ERROR: creating unique indexes on non-partition columns is currently unsupported
CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(b);
ERROR: creating unique indexes on append-partitioned tables is currently unsupported
CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(a);
ERROR: creating unique indexes on append-partitioned tables is currently unsupported
CREATE UNIQUE INDEX try_unique_append_index_a_b ON index_test_append(a,b);
ERROR: creating unique indexes on append-partitioned tables is currently unsupported
-- Verify that we error out in case of postgres errors on supported statement
-- types.
CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey);
ERROR: relation "lineitem_orderkey_index" already exists
CREATE INDEX try_index ON lineitem USING gist (l_orderkey);
ERROR: data type bigint has no default operator class for access method "gist"
HINT: You must specify an operator class for the index or define a default operator class for the data type.
CREATE INDEX try_index ON lineitem (non_existent_column);
ERROR: column "non_existent_column" does not exist
CREATE INDEX ON lineitem (l_orderkey);
ERROR: creating index without a name on a distributed table is currently unsupported
-- Verify that none of failed indexes got created on the master node
SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname;
schemaname | tablename | indexname | tablespace | indexdef
------------+------------------+------------------------------------+------------+----------------------------------------------------------------------------------------------------------------------------
public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON public.index_test_hash USING btree (a)
public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON public.index_test_hash USING btree (a, b)
public | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON public.index_test_hash USING btree (a, b) WHERE (c IS NOT NULL)
public | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON public.index_test_range USING btree (a)
public | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON public.index_test_range USING btree (a, b)
public | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON public.index_test_range USING btree (a, b) WHERE (c IS NOT NULL)
public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON public.lineitem USING btree (record_ne(lineitem.*, NULL::record))
public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey)
public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON public.lineitem USING btree (l_orderkey)
public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON public.lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date)
public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC)
public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber)
public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate)
(15 rows)
--
-- REINDEX
--
REINDEX INDEX lineitem_orderkey_index;
REINDEX TABLE lineitem;
REINDEX SCHEMA public;
REINDEX DATABASE regression;
REINDEX SYSTEM regression;
--
-- DROP INDEX
--
-- Verify that we can't drop multiple indexes in a single command
DROP INDEX lineitem_orderkey_index, lineitem_partial_index;
ERROR: cannot drop multiple distributed objects in a single command
HINT: Try dropping each object in a separate DROP command.
-- Verify that we can succesfully drop indexes
DROP INDEX lineitem_orderkey_index;
DROP INDEX lineitem_orderkey_index_new;
DROP INDEX lineitem_partkey_desc_index;
DROP INDEX lineitem_partial_index;
DROP INDEX lineitem_colref_index;
-- Verify that we handle if exists statements correctly
DROP INDEX non_existent_index;
ERROR: index "non_existent_index" does not exist
DROP INDEX IF EXISTS non_existent_index;
NOTICE: index "non_existent_index" does not exist, skipping
DROP INDEX IF EXISTS lineitem_orderkey_hash_index;
DROP INDEX lineitem_orderkey_hash_index;
ERROR: index "lineitem_orderkey_hash_index" does not exist
DROP INDEX index_test_range_index_a;
DROP INDEX index_test_range_index_a_b;
DROP INDEX index_test_range_index_a_b_partial;
DROP INDEX index_test_hash_index_a;
DROP INDEX index_test_hash_index_a_b;
DROP INDEX index_test_hash_index_a_b_partial;
-- Verify that we can drop indexes concurrently
DROP INDEX CONCURRENTLY lineitem_concurrently_index;
-- Verify that all the indexes are dropped from the master and one worker node.
-- As there's a primary key, so exclude those from this check.
SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%';
indrelid | indexrelid
----------+------------
(0 rows)
SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
schemaname | tablename | indexname | tablespace | indexdef
------------+-----------+-----------+------------+----------
(0 rows)
\c - - - :worker_1_port
SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%';
indrelid | indexrelid
----------+------------
(0 rows)
SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
schemaname | tablename | indexname | tablespace | indexdef
------------+-----------+-----------+------------+----------
(0 rows)
-- create index that will conflict with master operations
CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON index_test_hash_102089(b);
\c - - - :master_port
-- should fail because worker index already exists
CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b);
ERROR: CONCURRENTLY-enabled index command failed
DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index.
HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command.
-- the failure results in an INVALID index
SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass;
Index Valid?
--------------
f
(1 row)
-- we can clean it up and recreate with an DROP IF EXISTS
DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx;
CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b);
SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass;
Index Valid?
--------------
t
(1 row)
\c - - - :worker_1_port
-- now drop shard index to test partial master DROP failure
DROP INDEX CONCURRENTLY ith_b_idx_102089;
\c - - - :master_port
DROP INDEX CONCURRENTLY ith_b_idx;
ERROR: CONCURRENTLY-enabled index command failed
DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index.
HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command.
-- the failure results in an INVALID index
SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass;
Index Valid?
--------------
f
(1 row)
-- final clean up
DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx;
-- Drop created tables
DROP TABLE index_test_range;
DROP TABLE index_test_hash;
DROP TABLE index_test_append;

View File

@ -1,6 +1,5 @@
-- if the output of following query changes, we might need to change
-- some heap_getattr() calls to heap_deform_tuple(). This errors out in
-- postgres versions before 11.
-- some heap_getattr() calls to heap_deform_tuple().
SELECT attrelid::regclass, attname, atthasmissing, attmissingval
FROM pg_attribute
WHERE atthasmissing

View File

@ -1,10 +0,0 @@
-- if the output of following query changes, we might need to change
-- some heap_getattr() calls to heap_deform_tuple(). This errors out in
-- postgres versions before 11.
SELECT attrelid::regclass, attname, atthasmissing, attmissingval
FROM pg_attribute
WHERE atthasmissing
ORDER BY attrelid, attname;
ERROR: column "atthasmissing" does not exist
LINE 1: SELECT attrelid::regclass, attname, atthasmissing, attmissin...
^

View File

@ -3,14 +3,6 @@
--
-- Test user permissions.
--
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
t
(1 row)
SET citus.next_shard_id TO 1420000;
SET citus.shard_replication_factor TO 1;
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;

View File

@ -1,693 +0,0 @@
--
-- MULTI_MULTIUSERS
--
-- Test user permissions.
--
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
f
(1 row)
SET citus.next_shard_id TO 1420000;
SET citus.shard_replication_factor TO 1;
CREATE TABLE test (id integer, val integer);
SELECT create_distributed_table('test', 'id');
create_distributed_table
--------------------------
(1 row)
CREATE TABLE test_coloc (id integer, val integer);
SELECT create_distributed_table('test_coloc', 'id', colocate_with := 'none');
create_distributed_table
--------------------------
(1 row)
SET citus.shard_count TO 1;
CREATE TABLE singleshard (id integer, val integer);
SELECT create_distributed_table('singleshard', 'id');
create_distributed_table
--------------------------
(1 row)
-- turn off propagation to avoid Enterprise processing the following section
SET citus.enable_ddl_propagation TO off;
CREATE USER full_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE USER usage_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE USER read_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE USER no_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE ROLE some_role;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test TO full_access;
GRANT SELECT ON TABLE test TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT ALL ON SCHEMA full_access_user_schema TO full_access;
GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access;
SET citus.enable_ddl_propagation TO DEFAULT;
\c - - - :worker_1_port
CREATE USER full_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE USER usage_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE USER read_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE USER no_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE ROLE some_role;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test_1420000 TO full_access;
GRANT SELECT ON TABLE test_1420000 TO read_access;
GRANT ALL ON TABLE test_1420002 TO full_access;
GRANT SELECT ON TABLE test_1420002 TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
GRANT ALL ON SCHEMA full_access_user_schema TO full_access;
GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access;
\c - - - :worker_2_port
CREATE USER full_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE USER usage_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE USER read_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE USER no_access;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE ROLE some_role;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT some_role TO full_access;
GRANT some_role TO read_access;
GRANT ALL ON TABLE test_1420001 TO full_access;
GRANT SELECT ON TABLE test_1420001 TO read_access;
GRANT ALL ON TABLE test_1420003 TO full_access;
GRANT SELECT ON TABLE test_1420003 TO read_access;
CREATE SCHEMA full_access_user_schema;
REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC;
GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
GRANT ALL ON SCHEMA full_access_user_schema TO full_access;
GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access;
\c - - - :master_port
-- create prepare tests
PREPARE prepare_insert AS INSERT INTO test VALUES ($1);
PREPARE prepare_select AS SELECT count(*) FROM test;
-- not allowed to read absolute paths, even as superuser
COPY "/etc/passwd" TO STDOUT WITH (format transmit);
ERROR: absolute path not allowed
-- not allowed to read paths outside pgsql_job_cache, even as superuser
COPY "postgresql.conf" TO STDOUT WITH (format transmit);
ERROR: path must be in the pgsql_job_cache directory
-- check full permission
SET ROLE full_access;
EXECUTE prepare_insert(1);
EXECUTE prepare_select;
count
-------
1
(1 row)
INSERT INTO test VALUES (2);
SELECT count(*) FROM test;
count
-------
2
(1 row)
SELECT count(*) FROM test WHERE id = 1;
count
-------
1
(1 row)
SET citus.task_executor_type TO 'task-tracker';
SELECT count(*), min(current_user) FROM test;
count | min
-------+-------------
2 | full_access
(1 row)
-- test re-partition query (needs to transmit intermediate results)
SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2;
count
-------
0
(1 row)
-- should not be able to transmit directly
COPY "postgresql.conf" TO STDOUT WITH (format transmit);
ERROR: operation is not allowed
HINT: Run the command with a superuser.
RESET citus.task_executor_type;
-- should not be able to transmit directly
COPY "postgresql.conf" TO STDOUT WITH (format transmit);
ERROR: operation is not allowed
HINT: Run the command with a superuser.
-- create a task that other users should not be able to inspect
SELECT task_tracker_assign_task(1, 1, 'SELECT 1');
task_tracker_assign_task
--------------------------
(1 row)
-- check read permission
SET ROLE read_access;
EXECUTE prepare_insert(1);
ERROR: permission denied for relation test
EXECUTE prepare_select;
count
-------
2
(1 row)
INSERT INTO test VALUES (2);
ERROR: permission denied for relation test
SELECT count(*) FROM test;
count
-------
2
(1 row)
SELECT count(*) FROM test WHERE id = 1;
count
-------
1
(1 row)
SET citus.task_executor_type TO 'task-tracker';
SELECT count(*), min(current_user) FROM test;
count | min
-------+-------------
2 | read_access
(1 row)
-- test re-partition query (needs to transmit intermediate results)
SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2;
count
-------
0
(1 row)
-- should not be able to transmit directly
COPY "postgresql.conf" TO STDOUT WITH (format transmit);
ERROR: operation is not allowed
HINT: Run the command with a superuser.
-- should not be able to access tasks or jobs belonging to a different user
SELECT task_tracker_task_status(1, 1);
ERROR: could not find the worker task
DETAIL: Task jobId: 1 and taskId: 1
SELECT task_tracker_assign_task(1, 2, 'SELECT 1');
ERROR: must be owner of schema pg_merge_job_0001
SELECT task_tracker_cleanup_job(1);
ERROR: must be owner of schema pg_merge_job_0001
-- should not be allowed to take aggressive locks on table
BEGIN;
SELECT lock_relation_if_exists('test', 'ACCESS SHARE');
lock_relation_if_exists
-------------------------
t
(1 row)
SELECT lock_relation_if_exists('test', 'EXCLUSIVE');
ERROR: permission denied for relation test
ABORT;
RESET citus.task_executor_type;
-- check no permission
SET ROLE no_access;
EXECUTE prepare_insert(1);
ERROR: permission denied for relation test
EXECUTE prepare_select;
ERROR: permission denied for relation test
INSERT INTO test VALUES (2);
ERROR: permission denied for relation test
SELECT count(*) FROM test;
ERROR: permission denied for relation test
SELECT count(*) FROM test WHERE id = 1;
ERROR: permission denied for relation test
SET citus.task_executor_type TO 'task-tracker';
SELECT count(*), min(current_user) FROM test;
ERROR: permission denied for relation test
-- test re-partition query
SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2;
ERROR: permission denied for relation test
-- should not be able to transmit directly
COPY "postgresql.conf" TO STDOUT WITH (format transmit);
ERROR: operation is not allowed
HINT: Run the command with a superuser.
RESET citus.task_executor_type;
-- should be able to use intermediate results as any user
BEGIN;
SELECT create_intermediate_result('topten', 'SELECT s FROM generate_series(1,10) s');
create_intermediate_result
----------------------------
10
(1 row)
SELECT * FROM read_intermediate_result('topten', 'binary'::citus_copy_format) AS res (s int) ORDER BY s;
s
----
1
2
3
4
5
6
7
8
9
10
(10 rows)
END;
-- as long as we don't read from a table
BEGIN;
SELECT create_intermediate_result('topten', 'SELECT count(*) FROM test');
ERROR: permission denied for relation test
ABORT;
SELECT * FROM citus_stat_statements_reset();
ERROR: permission denied for function citus_stat_statements_reset
-- should not be allowed to upgrade to reference table
SELECT upgrade_to_reference_table('singleshard');
ERROR: must be owner of relation singleshard
-- should not be allowed to co-located tables
SELECT mark_tables_colocated('test', ARRAY['test_coloc'::regclass]);
ERROR: must be owner of relation test
-- should not be allowed to take any locks
BEGIN;
SELECT lock_relation_if_exists('test', 'ACCESS SHARE');
ERROR: permission denied for relation test
ABORT;
BEGIN;
SELECT lock_relation_if_exists('test', 'EXCLUSIVE');
ERROR: permission denied for relation test
ABORT;
-- table owner should be the same on the shards, even when distributing the table as superuser
SET ROLE full_access;
CREATE TABLE my_table (id integer, val integer);
RESET ROLE;
SELECT create_distributed_table('my_table', 'id');
create_distributed_table
--------------------------
(1 row)
SELECT result FROM run_command_on_workers($$SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_table_%' LIMIT 1$$);
result
-------------
full_access
full_access
(2 rows)
SELECT task_tracker_cleanup_job(1);
task_tracker_cleanup_job
--------------------------
(1 row)
-- table should be distributable by super user when it has data in there
SET ROLE full_access;
CREATE TABLE my_table_with_data (id integer, val integer);
INSERT INTO my_table_with_data VALUES (1,2);
RESET ROLE;
SELECT create_distributed_table('my_table_with_data', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
SELECT count(*) FROM my_table_with_data;
count
-------
1
(1 row)
-- table that is owned by a role should be distributable by a user that has that role granted
-- while it should not be if the user has the role not granted
SET ROLE full_access;
CREATE TABLE my_role_table_with_data (id integer, val integer);
ALTER TABLE my_role_table_with_data OWNER TO some_role;
INSERT INTO my_role_table_with_data VALUES (1,2);
RESET ROLE;
-- we first try to distribute it with a user that does not have the role so we can reuse the table
SET ROLE no_access;
SELECT create_distributed_table('my_role_table_with_data', 'id');
ERROR: must be owner of relation my_role_table_with_data
RESET ROLE;
-- then we try to distribute it with a user that has the role but different then the one creating
SET ROLE read_access;
SELECT create_distributed_table('my_role_table_with_data', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
RESET ROLE;
-- lastly we want to verify the table owner is set to the role, not the user that distributed
SELECT result FROM run_command_on_workers($cmd$
SELECT tableowner FROM pg_tables WHERE tablename LIKE 'my_role_table_with_data%' LIMIT 1;
$cmd$);
result
-----------
some_role
some_role
(2 rows)
-- we want to verify a user without CREATE access cannot distribute its table, but can get
-- its table distributed by the super user
-- we want to make sure the schema and user are setup in such a way they can't create a
-- table
SET ROLE usage_access;
CREATE TABLE full_access_user_schema.t1 (id int);
ERROR: permission denied for schema full_access_user_schema
LINE 1: CREATE TABLE full_access_user_schema.t1 (id int);
^
RESET ROLE;
-- now we create the table for the user
CREATE TABLE full_access_user_schema.t1 (id int);
ALTER TABLE full_access_user_schema.t1 OWNER TO usage_access;
-- make sure we can insert data
SET ROLE usage_access;
INSERT INTO full_access_user_schema.t1 VALUES (1),(2),(3);
-- creating the table should fail with a failure on the worker machine since the user is
-- not allowed to create a table
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
ERROR: permission denied for schema full_access_user_schema
CONTEXT: while executing command on localhost:57638
RESET ROLE;
-- now we distribute the table as super user
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
-- verify the owner of the shards for the distributed tables
SELECT result FROM run_command_on_workers($cmd$
SELECT tableowner FROM pg_tables WHERE
true
AND schemaname = 'full_access_user_schema'
AND tablename LIKE 't1_%'
LIMIT 1;
$cmd$);
result
--------------
usage_access
usage_access
(2 rows)
-- a user with all privileges on a schema should be able to distribute tables
SET ROLE full_access;
CREATE TABLE full_access_user_schema.t2(id int);
SELECT create_distributed_table('full_access_user_schema.t2', 'id');
create_distributed_table
--------------------------
(1 row)
RESET ROLE;
-- a user with all privileges on a schema should be able to upgrade a distributed table to
-- a reference table
SET ROLE full_access;
BEGIN;
CREATE TABLE full_access_user_schema.r1(id int);
SET LOCAL citus.shard_count TO 1;
SELECT create_distributed_table('full_access_user_schema.r1', 'id');
create_distributed_table
--------------------------
(1 row)
SELECT upgrade_to_reference_table('full_access_user_schema.r1');
upgrade_to_reference_table
----------------------------
(1 row)
COMMIT;
RESET ROLE;
-- the super user should be able to upgrade a distributed table to a reference table, even
-- if it is owned by another user
SET ROLE full_access;
BEGIN;
CREATE TABLE full_access_user_schema.r2(id int);
SET LOCAL citus.shard_count TO 1;
SELECT create_distributed_table('full_access_user_schema.r2', 'id');
create_distributed_table
--------------------------
(1 row)
COMMIT;
RESET ROLE;
-- the usage_access should not be able to upgrade the table
SET ROLE usage_access;
SELECT upgrade_to_reference_table('full_access_user_schema.r2');
ERROR: must be owner of relation r2
RESET ROLE;
-- the super user should be able
SELECT upgrade_to_reference_table('full_access_user_schema.r2');
upgrade_to_reference_table
----------------------------
(1 row)
-- verify the owner of the shards for the reference table
SELECT result FROM run_command_on_workers($cmd$
SELECT tableowner FROM pg_tables WHERE
true
AND schemaname = 'full_access_user_schema'
AND tablename LIKE 'r2_%'
LIMIT 1;
$cmd$);
result
-------------
full_access
full_access
(2 rows)
-- super user should be the only one being able to call worker_cleanup_job_schema_cache
SELECT worker_cleanup_job_schema_cache();
worker_cleanup_job_schema_cache
---------------------------------
(1 row)
SET ROLE full_access;
SELECT worker_cleanup_job_schema_cache();
ERROR: permission denied for function worker_cleanup_job_schema_cache
SET ROLE usage_access;
SELECT worker_cleanup_job_schema_cache();
ERROR: permission denied for function worker_cleanup_job_schema_cache
SET ROLE read_access;
SELECT worker_cleanup_job_schema_cache();
ERROR: permission denied for function worker_cleanup_job_schema_cache
SET ROLE no_access;
SELECT worker_cleanup_job_schema_cache();
ERROR: permission denied for function worker_cleanup_job_schema_cache
RESET ROLE;
-- to test access to files created during repartition we will create some on worker 1
\c - - - :worker_1_port
SET ROLE full_access;
SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS a', 'a', 23, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]);
worker_hash_partition_table
-----------------------------
(1 row)
RESET ROLE;
-- all attempts for transfer are initiated from other workers
\c - - - :worker_2_port
-- super user should not be able to copy files created by a user
SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port);
WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.10": No such file or directory
CONTEXT: while executing command on localhost:57637
ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:57637
-- different user should not be able to fetch partition file
SET ROLE usage_access;
SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port);
WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.18007": No such file or directory
CONTEXT: while executing command on localhost:57637
ERROR: could not receive file "base/pgsql_job_cache/job_0042/task_000001/p_00001" from localhost:57637
-- only the user whom created the files should be able to fetch
SET ROLE full_access;
SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port);
worker_fetch_partition_file
-----------------------------
(1 row)
RESET ROLE;
-- now we will test that only the user who owns the fetched file is able to merge it into
-- a table
-- test that no other user can merge the downloaded file before the task is being tracked
SET ROLE usage_access;
SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']);
ERROR: job schema does not exist
DETAIL: must be superuser to use public schema
RESET ROLE;
SET ROLE full_access;
-- use the side effect of this function to have a schema to use, otherwise only the super
-- user could call worker_merge_files_into_table and store the results in public, which is
-- not what we want
SELECT task_tracker_assign_task(42, 1, 'SELECT 1');
task_tracker_assign_task
--------------------------
(1 row)
RESET ROLE;
-- test that no other user can merge the downloaded file after the task is being tracked
SET ROLE usage_access;
SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']);
ERROR: must be owner of schema pg_merge_job_0042
RESET ROLE;
-- test that the super user is unable to read the contents of the intermediate file,
-- although it does create the table
SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']);
WARNING: Task file "task_000001.18003" does not have expected suffix ".10"
worker_merge_files_into_table
-------------------------------
(1 row)
SELECT count(*) FROM pg_merge_job_0042.task_000001;
count
-------
0
(1 row)
DROP TABLE pg_merge_job_0042.task_000001; -- drop table so we can reuse the same files for more tests
SET ROLE full_access;
SELECT worker_merge_files_into_table(42, 1, ARRAY['a'], ARRAY['integer']);
worker_merge_files_into_table
-------------------------------
(1 row)
SELECT count(*) FROM pg_merge_job_0042.task_000001;
count
-------
25
(1 row)
DROP TABLE pg_merge_job_0042.task_000001; -- drop table so we can reuse the same files for more tests
RESET ROLE;
-- test that no other user can merge files and run query on the already fetched files
SET ROLE usage_access;
SELECT worker_merge_files_and_run_query(42, 1,
'CREATE TABLE task_000001_merge(merge_column_0 int)',
'CREATE TABLE task_000001 (a) AS SELECT sum(merge_column_0) FROM task_000001_merge'
);
ERROR: must be owner of schema pg_merge_job_0042
RESET ROLE;
-- test that the super user is unable to read the contents of the partitioned files after
-- trying to merge with run query
SELECT worker_merge_files_and_run_query(42, 1,
'CREATE TABLE task_000001_merge(merge_column_0 int)',
'CREATE TABLE task_000001 (a) AS SELECT sum(merge_column_0) FROM task_000001_merge'
);
WARNING: Task file "task_000001.18003" does not have expected suffix ".10"
worker_merge_files_and_run_query
----------------------------------
(1 row)
SELECT count(*) FROM pg_merge_job_0042.task_000001_merge;
count
-------
0
(1 row)
SELECT count(*) FROM pg_merge_job_0042.task_000001;
count
-------
1
(1 row)
DROP TABLE pg_merge_job_0042.task_000001, pg_merge_job_0042.task_000001_merge; -- drop table so we can reuse the same files for more tests
-- test that the owner of the task can merge files and run query correctly
SET ROLE full_access;
SELECT worker_merge_files_and_run_query(42, 1,
'CREATE TABLE task_000001_merge(merge_column_0 int)',
'CREATE TABLE task_000001 (a) AS SELECT sum(merge_column_0) FROM task_000001_merge'
);
worker_merge_files_and_run_query
----------------------------------
(1 row)
-- test that owner of task cannot execute arbitrary sql
SELECT worker_merge_files_and_run_query(42, 1,
'CREATE TABLE task_000002_merge(merge_column_0 int)',
'DROP USER usage_access'
);
ERROR: permission denied to drop role
CONTEXT: SQL statement "DROP USER usage_access"
SELECT worker_merge_files_and_run_query(42, 1,
'DROP USER usage_access',
'CREATE TABLE task_000002 (a) AS SELECT sum(merge_column_0) FROM task_000002_merge'
);
ERROR: permission denied to drop role
CONTEXT: SQL statement "DROP USER usage_access"
SELECT count(*) FROM pg_merge_job_0042.task_000001_merge;
count
-------
25
(1 row)
SELECT count(*) FROM pg_merge_job_0042.task_000001;
count
-------
1
(1 row)
DROP TABLE pg_merge_job_0042.task_000001, pg_merge_job_0042.task_000001_merge; -- drop table so we can reuse the same files for more tests
RESET ROLE;
\c - - - :master_port
DROP SCHEMA full_access_user_schema CASCADE;
NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table full_access_user_schema.t1
drop cascades to table full_access_user_schema.t2
drop cascades to table full_access_user_schema.r1
drop cascades to table full_access_user_schema.r2
DROP TABLE
my_table,
my_table_with_data,
my_role_table_with_data,
singleshard,
test,
test_coloc;
DROP USER full_access;
DROP USER read_access;
DROP USER no_access;
DROP ROLE some_role;

View File

@ -1,372 +0,0 @@
-- Test passing off CALL to mx workers
create schema multi_mx_call;
set search_path to multi_mx_call, public;
-- Create worker-local tables to test procedure calls were routed
set citus.shard_replication_factor to 2;
set citus.replication_model to 'statement';
-- This table requires specific settings, create before getting into things
create table mx_call_dist_table_replica(id int, val int);
select create_distributed_table('mx_call_dist_table_replica', 'id');
create_distributed_table
--------------------------
(1 row)
insert into mx_call_dist_table_replica values (9,1),(8,2),(7,3),(6,4),(5,5);
set citus.shard_replication_factor to 1;
set citus.replication_model to 'streaming';
--
-- Create tables and procedures we want to use in tests
--
create table mx_call_dist_table_1(id int, val int);
select create_distributed_table('mx_call_dist_table_1', 'id');
create_distributed_table
--------------------------
(1 row)
insert into mx_call_dist_table_1 values (3,1),(4,5),(9,2),(6,5),(3,5);
create table mx_call_dist_table_2(id int, val int);
select create_distributed_table('mx_call_dist_table_2', 'id');
create_distributed_table
--------------------------
(1 row)
insert into mx_call_dist_table_2 values (1,1),(1,2),(2,2),(3,3),(3,4);
create table mx_call_dist_table_bigint(id bigint, val bigint);
select create_distributed_table('mx_call_dist_table_bigint', 'id');
create_distributed_table
--------------------------
(1 row)
insert into mx_call_dist_table_bigint values (1,1),(1,2),(2,2),(3,3),(3,4);
create table mx_call_dist_table_ref(id int, val int);
select create_reference_table('mx_call_dist_table_ref');
create_reference_table
------------------------
(1 row)
insert into mx_call_dist_table_ref values (2,7),(1,8),(2,8),(1,8),(2,8);
create type mx_call_enum as enum ('A', 'S', 'D', 'F');
create table mx_call_dist_table_enum(id int, key mx_call_enum);
select create_distributed_table('mx_call_dist_table_enum', 'key');
create_distributed_table
--------------------------
(1 row)
insert into mx_call_dist_table_enum values (1,'S'),(2,'A'),(3,'D'),(4,'F');
CREATE PROCEDURE mx_call_proc(x int, INOUT y int)
LANGUAGE plpgsql AS $$
BEGIN
-- groupid is 0 in coordinator and non-zero in workers, so by using it here
-- we make sure the procedure is being executed in the worker.
y := x + (select case groupid when 0 then 1 else 0 end from pg_dist_local_group);
-- we also make sure that we can run distributed queries in the procedures
-- that are routed to the workers.
y := y + (select sum(t1.val + t2.val) from multi_mx_call.mx_call_dist_table_1 t1 join multi_mx_call.mx_call_dist_table_2 t2 on t1.id = t2.id);
END;$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE mx_call_proc(x int, INOUT y int)
^
CREATE PROCEDURE mx_call_proc_bigint(x bigint, INOUT y bigint)
LANGUAGE plpgsql AS $$
BEGIN
y := x + y * 2;
END;$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE mx_call_proc_bigint(x bigint, INOUT y bigin...
^
-- create another procedure which verifies:
-- 1. we work fine with multiple return columns
-- 2. we work fine in combination with custom types
CREATE PROCEDURE mx_call_proc_custom_types(INOUT x mx_call_enum, INOUT y mx_call_enum)
LANGUAGE plpgsql AS $$
BEGIN
y := x;
x := (select case groupid when 0 then 'F' else 'S' end from pg_dist_local_group);
END;$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE mx_call_proc_custom_types(INOUT x mx_call_e...
^
-- Test that undistributed procedures have no issue executing
call multi_mx_call.mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(2, 0);
^
call multi_mx_call.mx_call_proc_custom_types('S', 'A');
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc_custom_types('S', 'A');
^
-- Same for unqualified names
call mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call mx_call_proc(2, 0);
^
call mx_call_proc_custom_types('S', 'A');
ERROR: syntax error at or near "call"
LINE 1: call mx_call_proc_custom_types('S', 'A');
^
-- Mark both procedures as distributed ...
select create_distributed_function('mx_call_proc(int,int)');
ERROR: function "mx_call_proc(int,int)" does not exist
LINE 1: select create_distributed_function('mx_call_proc(int,int)');
^
select create_distributed_function('mx_call_proc_bigint(bigint,bigint)');
ERROR: function "mx_call_proc_bigint(bigint,bigint)" does not exist
LINE 1: select create_distributed_function('mx_call_proc_bigint(bigi...
^
select create_distributed_function('mx_call_proc_custom_types(mx_call_enum,mx_call_enum)');
ERROR: function "mx_call_proc_custom_types(mx_call_enum,mx_call_enum)" does not exist
LINE 1: select create_distributed_function('mx_call_proc_custom_type...
^
-- We still don't route them to the workers, because they aren't
-- colocated with any distributed tables.
SET client_min_messages TO DEBUG1;
call multi_mx_call.mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(2, 0);
^
call mx_call_proc_bigint(4, 2);
ERROR: syntax error at or near "call"
LINE 1: call mx_call_proc_bigint(4, 2);
^
call multi_mx_call.mx_call_proc_custom_types('S', 'A');
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc_custom_types('S', 'A');
^
-- Mark them as colocated with a table. Now we should route them to workers.
select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 1);
colocate_proc_with_table
--------------------------
(1 row)
select colocate_proc_with_table('mx_call_proc_bigint', 'mx_call_dist_table_bigint'::regclass, 1);
colocate_proc_with_table
--------------------------
(1 row)
select colocate_proc_with_table('mx_call_proc_custom_types', 'mx_call_dist_table_enum'::regclass, 1);
colocate_proc_with_table
--------------------------
(1 row)
call multi_mx_call.mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(2, 0);
^
call multi_mx_call.mx_call_proc_custom_types('S', 'A');
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc_custom_types('S', 'A');
^
call mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call mx_call_proc(2, 0);
^
call mx_call_proc_custom_types('S', 'A');
ERROR: syntax error at or near "call"
LINE 1: call mx_call_proc_custom_types('S', 'A');
^
-- Test implicit cast of int to bigint
call mx_call_proc_bigint(4, 2);
ERROR: syntax error at or near "call"
LINE 1: call mx_call_proc_bigint(4, 2);
^
-- We don't allow distributing calls inside transactions
begin;
call multi_mx_call.mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(2, 0);
^
commit;
-- Drop the table colocated with mx_call_proc_custom_types. Now it shouldn't
-- be routed to workers anymore.
SET client_min_messages TO NOTICE;
drop table mx_call_dist_table_enum;
SET client_min_messages TO DEBUG1;
call multi_mx_call.mx_call_proc_custom_types('S', 'A');
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc_custom_types('S', 'A');
^
-- Make sure we do bounds checking on distributed argument index
-- This also tests that we have cache invalidation for pg_dist_object updates
select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, -1);
colocate_proc_with_table
--------------------------
(1 row)
call multi_mx_call.mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(2, 0);
^
select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 2);
colocate_proc_with_table
--------------------------
(1 row)
call multi_mx_call.mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(2, 0);
^
-- We don't currently support colocating with reference tables
select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_ref'::regclass, 1);
colocate_proc_with_table
--------------------------
(1 row)
call multi_mx_call.mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(2, 0);
^
-- We don't currently support colocating with replicated tables
select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_replica'::regclass, 1);
colocate_proc_with_table
--------------------------
(1 row)
call multi_mx_call.mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(2, 0);
^
SET client_min_messages TO NOTICE;
drop table mx_call_dist_table_replica;
SET client_min_messages TO DEBUG1;
select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass, 1);
colocate_proc_with_table
--------------------------
(1 row)
-- Test that we handle transactional constructs correctly inside a procedure
-- that is routed to the workers.
CREATE PROCEDURE mx_call_proc_tx(x int) LANGUAGE plpgsql AS $$
BEGIN
INSERT INTO multi_mx_call.mx_call_dist_table_1 VALUES (x, -1), (x+1, 4);
COMMIT;
UPDATE multi_mx_call.mx_call_dist_table_1 SET val = val+1 WHERE id >= x;
ROLLBACK;
-- Now do the final update!
UPDATE multi_mx_call.mx_call_dist_table_1 SET val = val-1 WHERE id >= x;
END;$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE mx_call_proc_tx(x int) LANGUAGE plpgsql AS ...
^
-- before distribution ...
CALL multi_mx_call.mx_call_proc_tx(10);
ERROR: syntax error at or near "CALL"
LINE 1: CALL multi_mx_call.mx_call_proc_tx(10);
^
-- after distribution ...
select create_distributed_function('mx_call_proc_tx(int)', '$1', 'mx_call_dist_table_1');
ERROR: function "mx_call_proc_tx(int)" does not exist
LINE 1: select create_distributed_function('mx_call_proc_tx(int)', '...
^
CALL multi_mx_call.mx_call_proc_tx(20);
ERROR: syntax error at or near "CALL"
LINE 1: CALL multi_mx_call.mx_call_proc_tx(20);
^
SELECT id, val FROM mx_call_dist_table_1 ORDER BY id, val;
id | val
----+-----
3 | 1
3 | 5
4 | 5
6 | 5
9 | 2
(5 rows)
-- Test that we properly propagate errors raised from procedures.
CREATE PROCEDURE mx_call_proc_raise(x int) LANGUAGE plpgsql AS $$
BEGIN
RAISE WARNING 'warning';
RAISE EXCEPTION 'error';
END;$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE mx_call_proc_raise(x int) LANGUAGE plpgsql ...
^
select create_distributed_function('mx_call_proc_raise(int)', '$1', 'mx_call_dist_table_1');
ERROR: function "mx_call_proc_raise(int)" does not exist
LINE 1: select create_distributed_function('mx_call_proc_raise(int)'...
^
\set VERBOSITY terse
call multi_mx_call.mx_call_proc_raise(2);
ERROR: syntax error at or near "call" at character 1
\set VERBOSITY default
-- Test that we don't propagate to non-metadata worker nodes
select stop_metadata_sync_to_node('localhost', :worker_1_port);
stop_metadata_sync_to_node
----------------------------
(1 row)
select stop_metadata_sync_to_node('localhost', :worker_2_port);
stop_metadata_sync_to_node
----------------------------
(1 row)
call multi_mx_call.mx_call_proc(2, 0);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(2, 0);
^
SET client_min_messages TO NOTICE;
select start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
-----------------------------
(1 row)
select start_metadata_sync_to_node('localhost', :worker_2_port);
start_metadata_sync_to_node
-----------------------------
(1 row)
-- stop_metadata_sync_to_node()/start_metadata_sync_to_node() might make
-- worker backend caches inconsistent. Reconnect to coordinator to use
-- new worker connections, hence new backends.
\c - - - :master_port
SET search_path to multi_mx_call, public;
SET client_min_messages TO DEBUG1;
--
-- Test non-const parameter values
--
CREATE FUNCTION mx_call_add(int, int) RETURNS int
AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE;
SELECT create_distributed_function('mx_call_add(int,int)');
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
create_distributed_function
-----------------------------
(1 row)
-- non-const distribution parameters cannot be pushed down
call multi_mx_call.mx_call_proc(2, mx_call_add(3, 4));
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(2, mx_call_add(3, 4));
^
-- non-const parameter can be pushed down
call multi_mx_call.mx_call_proc(multi_mx_call.mx_call_add(3, 4), 2);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(multi_mx_call.mx_call_add(3,...
^
-- volatile parameter cannot be pushed down
call multi_mx_call.mx_call_proc(floor(random())::int, 2);
ERROR: syntax error at or near "call"
LINE 1: call multi_mx_call.mx_call_proc(floor(random())::int, 2);
^
reset client_min_messages;
\set VERBOSITY terse
drop schema multi_mx_call cascade;
NOTICE: drop cascades to 6 other objects

View File

@ -1,869 +0,0 @@
--
-- MULTI_MX_EXPLAIN
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
\c - - - :worker_1_port
\c - - - :worker_2_port
\c - - - :master_port
\a\t
SET citus.task_executor_type TO 'real-time';
SET citus.explain_distributed_queries TO on;
VACUUM ANALYZE lineitem_mx;
VACUUM ANALYZE orders_mx;
VACUUM ANALYZE customer_mx;
VACUUM ANALYZE supplier_mx;
\c - - - :worker_1_port
-- Function that parses explain output as JSON
CREATE FUNCTION explain_json(query text)
RETURNS jsonb
AS $BODY$
DECLARE
result jsonb;
BEGIN
EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
RETURN result;
END;
$BODY$ LANGUAGE plpgsql;
-- Function that parses explain output as XML
CREATE FUNCTION explain_xml(query text)
RETURNS xml
AS $BODY$
DECLARE
result xml;
BEGIN
EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
RETURN result;
END;
$BODY$ LANGUAGE plpgsql;
\c - - - :worker_2_port
-- Function that parses explain output as JSON
CREATE FUNCTION explain_json(query text)
RETURNS jsonb
AS $BODY$
DECLARE
result jsonb;
BEGIN
EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
RETURN result;
END;
$BODY$ LANGUAGE plpgsql;
-- Function that parses explain output as XML
CREATE FUNCTION explain_xml(query text)
RETURNS xml
AS $BODY$
DECLARE
result xml;
BEGIN
EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
RETURN result;
END;
$BODY$ LANGUAGE plpgsql;
-- Test Text format
EXPLAIN (COSTS FALSE, FORMAT TEXT)
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort
Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity
-> HashAggregate
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Real-Time)
Task Count: 16
Tasks Shown: One of 16
-> Task
Node: host=localhost port=57637 dbname=regression
-> HashAggregate
Group Key: l_quantity
-> Seq Scan on lineitem_mx_1220052 lineitem_mx
-- Test JSON format
EXPLAIN (COSTS FALSE, FORMAT JSON)
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
[
{
"Plan": {
"Node Type": "Sort",
"Parallel Aware": false,
"Sort Key": ["COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)", "remote_scan.l_quantity"],
"Plans": [
{
"Node Type": "Aggregate",
"Strategy": "Hashed",
"Partial Mode": "Simple",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Group Key": ["remote_scan.l_quantity"],
"Plans": [
{
"Node Type": "Custom Scan",
"Parent Relationship": "Outer",
"Custom Plan Provider": "Citus Real-Time",
"Parallel Aware": false,
"Distributed Query": {
"Job": {
"Task Count": 16,
"Tasks Shown": "One of 16",
"Tasks": [
{
"Node": "host=localhost port=57637 dbname=regression",
"Remote Plan": [
[
{
"Plan": {
"Node Type": "Aggregate",
"Strategy": "Hashed",
"Partial Mode": "Simple",
"Parallel Aware": false,
"Group Key": ["l_quantity"],
"Plans": [
{
"Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Relation Name": "lineitem_mx_1220052",
"Alias": "lineitem_mx"
}
]
}
}
]
]
}
]
}
}
}
]
}
]
}
}
]
-- Validate JSON format
SELECT true AS valid FROM explain_json($$
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
t
\c - - - :worker_1_port
-- Test XML format
EXPLAIN (COSTS FALSE, FORMAT XML)
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>Sort</Node-Type>
<Parallel-Aware>false</Parallel-Aware>
<Sort-Key>
<Item>COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)</Item>
<Item>remote_scan.l_quantity</Item>
</Sort-Key>
<Plans>
<Plan>
<Node-Type>Aggregate</Node-Type>
<Strategy>Hashed</Strategy>
<Partial-Mode>Simple</Partial-Mode>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Group-Key>
<Item>remote_scan.l_quantity</Item>
</Group-Key>
<Plans>
<Plan>
<Node-Type>Custom Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Custom-Plan-Provider>Citus Real-Time</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware>
<Distributed-Query>
<Job>
<Task-Count>16</Task-Count>
<Tasks-Shown>One of 16</Tasks-Shown>
<Tasks>
<Task>
<Node>host=localhost port=57637 dbname=regression</Node>
<Remote-Plan>
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>Aggregate</Node-Type>
<Strategy>Hashed</Strategy>
<Partial-Mode>Simple</Partial-Mode>
<Parallel-Aware>false</Parallel-Aware>
<Group-Key>
<Item>l_quantity</Item>
</Group-Key>
<Plans>
<Plan>
<Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Relation-Name>lineitem_mx_1220052</Relation-Name>
<Alias>lineitem_mx</Alias>
</Plan>
</Plans>
</Plan>
</Query>
</explain>
</Remote-Plan>
</Task>
</Tasks>
</Job>
</Distributed-Query>
</Plan>
</Plans>
</Plan>
</Plans>
</Plan>
</Query>
</explain>
-- Validate XML format
SELECT true AS valid FROM explain_xml($$
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
t
-- Test YAML format
EXPLAIN (COSTS FALSE, FORMAT YAML)
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
- Plan:
Node Type: "Sort"
Parallel Aware: false
Sort Key:
- "COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)"
- "remote_scan.l_quantity"
Plans:
- Node Type: "Aggregate"
Strategy: "Hashed"
Partial Mode: "Simple"
Parent Relationship: "Outer"
Parallel Aware: false
Group Key:
- "remote_scan.l_quantity"
Plans:
- Node Type: "Custom Scan"
Parent Relationship: "Outer"
Custom Plan Provider: "Citus Real-Time"
Parallel Aware: false
Distributed Query:
Job:
Task Count: 16
Tasks Shown: "One of 16"
Tasks:
- Node: "host=localhost port=57637 dbname=regression"
Remote Plan:
- Plan:
Node Type: "Aggregate"
Strategy: "Hashed"
Partial Mode: "Simple"
Parallel Aware: false
Group Key:
- "l_quantity"
Plans:
- Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false
Relation Name: "lineitem_mx_1220052"
Alias: "lineitem_mx"
-- Test Text format
EXPLAIN (COSTS FALSE, FORMAT TEXT)
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort
Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity
-> HashAggregate
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Real-Time)
Task Count: 16
Tasks Shown: One of 16
-> Task
Node: host=localhost port=57637 dbname=regression
-> HashAggregate
Group Key: l_quantity
-> Seq Scan on lineitem_mx_1220052 lineitem_mx
\c - - - :worker_2_port
-- Test verbose
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx;
Aggregate
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
-> Custom Scan (Citus Real-Time)
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
Task Count: 16
Tasks Shown: One of 16
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
-> Seq Scan on public.lineitem_mx_1220052 lineitem_mx
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-- Test join
EXPLAIN (COSTS FALSE)
SELECT * FROM lineitem_mx
JOIN orders_mx ON l_orderkey = o_orderkey AND l_quantity < 5.0
ORDER BY l_quantity LIMIT 10;
Limit
-> Sort
Sort Key: remote_scan.l_quantity
-> Custom Scan (Citus Real-Time)
Task Count: 16
Tasks Shown: One of 16
-> Task
Node: host=localhost port=57637 dbname=regression
-> Limit
-> Sort
Sort Key: lineitem_mx.l_quantity
-> Hash Join
Hash Cond: (lineitem_mx.l_orderkey = orders_mx.o_orderkey)
-> Seq Scan on lineitem_mx_1220052 lineitem_mx
Filter: (l_quantity < 5.0)
-> Hash
-> Seq Scan on orders_mx_1220068 orders_mx
-- Test insert
EXPLAIN (COSTS FALSE)
INSERT INTO lineitem_mx VALUES(1,0);
Custom Scan (Citus Router)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=57637 dbname=regression
-> Insert on lineitem_mx_1220052
-> Result
-- Test update
EXPLAIN (COSTS FALSE)
UPDATE lineitem_mx
SET l_suppkey = 12
WHERE l_orderkey = 1 AND l_partkey = 0;
Custom Scan (Citus Router)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=57637 dbname=regression
-> Update on lineitem_mx_1220052 lineitem_mx
-> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
Index Cond: (l_orderkey = 1)
Filter: (l_partkey = 0)
-- Test delete
EXPLAIN (COSTS FALSE)
DELETE FROM lineitem_mx
WHERE l_orderkey = 1 AND l_partkey = 0;
Custom Scan (Citus Router)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=57637 dbname=regression
-> Delete on lineitem_mx_1220052 lineitem_mx
-> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
Index Cond: (l_orderkey = 1)
Filter: (l_partkey = 0)
-- make the outputs more consistent
VACUUM ANALYZE lineitem_mx;
VACUUM ANALYZE orders_mx;
VACUUM ANALYZE customer_mx;
VACUUM ANALYZE supplier_mx;
-- Test single-shard SELECT
EXPLAIN (COSTS FALSE)
SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5;
Custom Scan (Citus Router)
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=57638 dbname=regression
-> Index Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx
Index Cond: (l_orderkey = 5)
SELECT true AS valid FROM explain_xml($$
SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
t
SELECT true AS valid FROM explain_json($$
SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
t
-- Test CREATE TABLE ... AS
EXPLAIN (COSTS FALSE)
CREATE TABLE explain_result AS
SELECT * FROM lineitem_mx;
Custom Scan (Citus Real-Time)
Task Count: 16
Tasks Shown: One of 16
-> Task
Node: host=localhost port=57637 dbname=regression
-> Seq Scan on lineitem_mx_1220052 lineitem_mx
-- Test all tasks output
SET citus.explain_all_tasks TO on;
EXPLAIN (COSTS FALSE)
SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030;
Aggregate
-> Custom Scan (Citus Real-Time)
Task Count: 16
Tasks Shown: All
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220053 on lineitem_mx_1220053 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220054 on lineitem_mx_1220054 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220056 on lineitem_mx_1220056 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220057 on lineitem_mx_1220057 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220058 on lineitem_mx_1220058 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220059 on lineitem_mx_1220059 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220060 on lineitem_mx_1220060 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220061 on lineitem_mx_1220061 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220062 on lineitem_mx_1220062 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220063 on lineitem_mx_1220063 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220064 on lineitem_mx_1220064 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Seq Scan on lineitem_mx_1220065 lineitem_mx
Filter: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220066 on lineitem_mx_1220066 lineitem_mx
Index Cond: (l_orderkey > 9030)
-> Task
Node: host=localhost port=57638 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220067 on lineitem_mx_1220067 lineitem_mx
Index Cond: (l_orderkey > 9030)
SELECT true AS valid FROM explain_xml($$
SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
t
SELECT true AS valid FROM explain_json($$
SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
t
-- Test track tracker
SET citus.task_executor_type TO 'task-tracker';
SET citus.explain_all_tasks TO off;
EXPLAIN (COSTS FALSE)
SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030;
Aggregate
-> Custom Scan (Citus Task-Tracker)
Task Count: 16
Tasks Shown: One of 16
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx
Index Cond: (l_orderkey > 9030)
-- Test re-partition join
EXPLAIN (COSTS FALSE)
SELECT count(*)
FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey;
Aggregate
-> Custom Scan (Citus Task-Tracker)
Task Count: 16
Tasks Shown: One of 16
-> Task
Node: host=localhost port=57637 dbname=regression
-> Aggregate
-> Hash Join
Hash Cond: (lineitem_mx.l_orderkey = orders_mx.o_orderkey)
-> Hash Join
Hash Cond: (supplier_mx.s_suppkey = lineitem_mx.l_suppkey)
-> Seq Scan on supplier_mx_1220087 supplier_mx
-> Hash
-> Seq Scan on lineitem_mx_1220052 lineitem_mx
-> Hash
-> Hash Join
Hash Cond: (customer_mx.c_custkey = orders_mx.o_custkey)
-> Seq Scan on customer_mx_1220084 customer_mx
-> Hash
-> Seq Scan on orders_mx_1220068 orders_mx
EXPLAIN (COSTS FALSE, FORMAT JSON)
SELECT count(*)
FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey;
[
{
"Plan": {
"Node Type": "Aggregate",
"Strategy": "Plain",
"Partial Mode": "Simple",
"Parallel Aware": false,
"Plans": [
{
"Node Type": "Custom Scan",
"Parent Relationship": "Outer",
"Custom Plan Provider": "Citus Task-Tracker",
"Parallel Aware": false,
"Distributed Query": {
"Job": {
"Task Count": 16,
"Tasks Shown": "One of 16",
"Tasks": [
{
"Node": "host=localhost port=57637 dbname=regression",
"Remote Plan": [
[
{
"Plan": {
"Node Type": "Aggregate",
"Strategy": "Plain",
"Partial Mode": "Simple",
"Parallel Aware": false,
"Plans": [
{
"Node Type": "Hash Join",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Join Type": "Inner",
"Hash Cond": "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)",
"Plans": [
{
"Node Type": "Hash Join",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Join Type": "Inner",
"Hash Cond": "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)",
"Plans": [
{
"Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Relation Name": "supplier_mx_1220087",
"Alias": "supplier_mx"
},
{
"Node Type": "Hash",
"Parent Relationship": "Inner",
"Parallel Aware": false,
"Plans": [
{
"Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Relation Name": "lineitem_mx_1220052",
"Alias": "lineitem_mx"
}
]
}
]
},
{
"Node Type": "Hash",
"Parent Relationship": "Inner",
"Parallel Aware": false,
"Plans": [
{
"Node Type": "Hash Join",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Join Type": "Inner",
"Hash Cond": "(customer_mx.c_custkey = orders_mx.o_custkey)",
"Plans": [
{
"Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Relation Name": "customer_mx_1220084",
"Alias": "customer_mx"
},
{
"Node Type": "Hash",
"Parent Relationship": "Inner",
"Parallel Aware": false,
"Plans": [
{
"Node Type": "Seq Scan",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Relation Name": "orders_mx_1220068",
"Alias": "orders_mx"
}
]
}
]
}
]
}
]
}
]
}
}
]
]
}
]
}
}
}
]
}
}
]
SELECT true AS valid FROM explain_json($$
SELECT count(*)
FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey$$);
t
EXPLAIN (COSTS FALSE, FORMAT XML)
SELECT count(*)
FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey;
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>Aggregate</Node-Type>
<Strategy>Plain</Strategy>
<Partial-Mode>Simple</Partial-Mode>
<Parallel-Aware>false</Parallel-Aware>
<Plans>
<Plan>
<Node-Type>Custom Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Custom-Plan-Provider>Citus Task-Tracker</Custom-Plan-Provider>
<Parallel-Aware>false</Parallel-Aware>
<Distributed-Query>
<Job>
<Task-Count>16</Task-Count>
<Tasks-Shown>One of 16</Tasks-Shown>
<Tasks>
<Task>
<Node>host=localhost port=57637 dbname=regression</Node>
<Remote-Plan>
<explain xmlns="http://www.postgresql.org/2009/explain">
<Query>
<Plan>
<Node-Type>Aggregate</Node-Type>
<Strategy>Plain</Strategy>
<Partial-Mode>Simple</Partial-Mode>
<Parallel-Aware>false</Parallel-Aware>
<Plans>
<Plan>
<Node-Type>Hash Join</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Join-Type>Inner</Join-Type>
<Hash-Cond>(lineitem_mx.l_orderkey = orders_mx.o_orderkey)</Hash-Cond>
<Plans>
<Plan>
<Node-Type>Hash Join</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Join-Type>Inner</Join-Type>
<Hash-Cond>(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)</Hash-Cond>
<Plans>
<Plan>
<Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Relation-Name>supplier_mx_1220087</Relation-Name>
<Alias>supplier_mx</Alias>
</Plan>
<Plan>
<Node-Type>Hash</Node-Type>
<Parent-Relationship>Inner</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Plans>
<Plan>
<Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Relation-Name>lineitem_mx_1220052</Relation-Name>
<Alias>lineitem_mx</Alias>
</Plan>
</Plans>
</Plan>
</Plans>
</Plan>
<Plan>
<Node-Type>Hash</Node-Type>
<Parent-Relationship>Inner</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Plans>
<Plan>
<Node-Type>Hash Join</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Join-Type>Inner</Join-Type>
<Hash-Cond>(customer_mx.c_custkey = orders_mx.o_custkey)</Hash-Cond>
<Plans>
<Plan>
<Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Relation-Name>customer_mx_1220084</Relation-Name>
<Alias>customer_mx</Alias>
</Plan>
<Plan>
<Node-Type>Hash</Node-Type>
<Parent-Relationship>Inner</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Plans>
<Plan>
<Node-Type>Seq Scan</Node-Type>
<Parent-Relationship>Outer</Parent-Relationship>
<Parallel-Aware>false</Parallel-Aware>
<Relation-Name>orders_mx_1220068</Relation-Name>
<Alias>orders_mx</Alias>
</Plan>
</Plans>
</Plan>
</Plans>
</Plan>
</Plans>
</Plan>
</Plans>
</Plan>
</Plans>
</Plan>
</Query>
</explain>
</Remote-Plan>
</Task>
</Tasks>
</Job>
</Distributed-Query>
</Plan>
</Plans>
</Plan>
</Query>
</explain>
SELECT true AS valid FROM explain_xml($$
SELECT count(*)
FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey$$);
t
EXPLAIN (COSTS FALSE, FORMAT YAML)
SELECT count(*)
FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
WHERE l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey;
- Plan:
Node Type: "Aggregate"
Strategy: "Plain"
Partial Mode: "Simple"
Parallel Aware: false
Plans:
- Node Type: "Custom Scan"
Parent Relationship: "Outer"
Custom Plan Provider: "Citus Task-Tracker"
Parallel Aware: false
Distributed Query:
Job:
Task Count: 16
Tasks Shown: "One of 16"
Tasks:
- Node: "host=localhost port=57637 dbname=regression"
Remote Plan:
- Plan:
Node Type: "Aggregate"
Strategy: "Plain"
Partial Mode: "Simple"
Parallel Aware: false
Plans:
- Node Type: "Hash Join"
Parent Relationship: "Outer"
Parallel Aware: false
Join Type: "Inner"
Hash Cond: "(lineitem_mx.l_orderkey = orders_mx.o_orderkey)"
Plans:
- Node Type: "Hash Join"
Parent Relationship: "Outer"
Parallel Aware: false
Join Type: "Inner"
Hash Cond: "(supplier_mx.s_suppkey = lineitem_mx.l_suppkey)"
Plans:
- Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false
Relation Name: "supplier_mx_1220087"
Alias: "supplier_mx"
- Node Type: "Hash"
Parent Relationship: "Inner"
Parallel Aware: false
Plans:
- Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false
Relation Name: "lineitem_mx_1220052"
Alias: "lineitem_mx"
- Node Type: "Hash"
Parent Relationship: "Inner"
Parallel Aware: false
Plans:
- Node Type: "Hash Join"
Parent Relationship: "Outer"
Parallel Aware: false
Join Type: "Inner"
Hash Cond: "(customer_mx.c_custkey = orders_mx.o_custkey)"
Plans:
- Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false
Relation Name: "customer_mx_1220084"
Alias: "customer_mx"
- Node Type: "Hash"
Parent Relationship: "Inner"
Parallel Aware: false
Plans:
- Node Type: "Seq Scan"
Parent Relationship: "Outer"
Parallel Aware: false
Relation Name: "orders_mx_1220068"
Alias: "orders_mx"

View File

@ -1,270 +0,0 @@
--
-- Distributed Partitioned Table MX Tests
--
SET citus.next_shard_id TO 1700000;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
-- make sure wen can create partitioning tables in MX
SET citus.replication_model TO 'streaming';
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
-----------------------------
(1 row)
-- 1-) Distributing partitioned table
-- create partitioned table
CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time);
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partitioning_test(id int, time date) PARTITION ...
^
-- create its partitions
CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partitioning_test_2009 PARTITION OF partitionin...
^
CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partitioning_test_2010 PARTITION OF partitionin...
^
-- load some data and distribute tables
INSERT INTO partitioning_test VALUES (1, '2009-06-06');
ERROR: relation "partitioning_test" does not exist
LINE 1: INSERT INTO partitioning_test VALUES (1, '2009-06-06');
^
INSERT INTO partitioning_test VALUES (2, '2010-07-07');
ERROR: relation "partitioning_test" does not exist
LINE 1: INSERT INTO partitioning_test VALUES (2, '2010-07-07');
^
INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09');
ERROR: relation "partitioning_test_2009" does not exist
LINE 1: INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09');
^
INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03');
ERROR: relation "partitioning_test_2010" does not exist
LINE 1: INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03');
^
-- distribute partitioned table
SELECT create_distributed_table('partitioning_test', 'id');
ERROR: relation "partitioning_test" does not exist
LINE 1: SELECT create_distributed_table('partitioning_test', 'id');
^
-- see from MX node, the data is loaded to shards
\c - - - :worker_1_port
SELECT * FROM partitioning_test ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 1: SELECT * FROM partitioning_test ORDER BY 1;
^
-- see from MX node, partitioned table and its partitions are distributed
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010')
ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 6: logicalrelid IN ('partitioning_test', 'partitioning_test_20...
^
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010')
GROUP BY
logicalrelid
ORDER BY
1,2;
ERROR: relation "partitioning_test" does not exist
LINE 4: WHERE logicalrelid IN ('partitioning_test', 'partitioning_t...
^
-- see from MX node, partitioning hierarchy is built
SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 1: ...elid::regclass FROM pg_inherits WHERE inhparent = 'partition...
^
\c - - - :master_port
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
-- 2-) Creating partition of a distributed table
CREATE TABLE partitioning_test_2011 PARTITION OF partitioning_test FOR VALUES FROM ('2011-01-01') TO ('2012-01-01');
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partitioning_test_2011 PARTITION OF partitionin...
^
-- see from MX node, new partition is automatically distributed as well
\c - - - :worker_1_port
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
logicalrelid IN ('partitioning_test', 'partitioning_test_2011')
ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 6: logicalrelid IN ('partitioning_test', 'partitioning_test_20...
^
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011')
GROUP BY
logicalrelid
ORDER BY
1,2;
ERROR: relation "partitioning_test" does not exist
LINE 4: WHERE logicalrelid IN ('partitioning_test', 'partitioning_t...
^
-- see from MX node, partitioning hierarchy is built
SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 1: ...elid::regclass FROM pg_inherits WHERE inhparent = 'partition...
^
\c - - - :master_port
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
-- 3-) Attaching non distributed table to a distributed table
CREATE TABLE partitioning_test_2012(id int, time date);
-- load some data
INSERT INTO partitioning_test_2012 VALUES (5, '2012-06-06');
INSERT INTO partitioning_test_2012 VALUES (6, '2012-07-07');
ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2012 FOR VALUES FROM ('2012-01-01') TO ('2013-01-01');
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_...
^
-- see from MX node, attached partition is distributed as well
\c - - - :worker_1_port
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
logicalrelid IN ('partitioning_test', 'partitioning_test_2012')
ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 6: logicalrelid IN ('partitioning_test', 'partitioning_test_20...
^
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012')
GROUP BY
logicalrelid
ORDER BY
1,2;
ERROR: relation "partitioning_test" does not exist
LINE 4: WHERE logicalrelid IN ('partitioning_test', 'partitioning_t...
^
-- see from MX node, see the data is loaded to shards
SELECT * FROM partitioning_test ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 1: SELECT * FROM partitioning_test ORDER BY 1;
^
-- see from MX node, partitioning hierarchy is built
SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 1: ...elid::regclass FROM pg_inherits WHERE inhparent = 'partition...
^
\c - - - :master_port
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
-- 4-) Attaching distributed table to distributed table
CREATE TABLE partitioning_test_2013(id int, time date);
SELECT create_distributed_table('partitioning_test_2013', 'id');
create_distributed_table
--------------------------
(1 row)
-- load some data
INSERT INTO partitioning_test_2013 VALUES (7, '2013-06-06');
INSERT INTO partitioning_test_2013 VALUES (8, '2013-07-07');
ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES FROM ('2013-01-01') TO ('2014-01-01');
ERROR: syntax error at or near "ATTACH"
LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_...
^
-- see from MX node, see the data is loaded to shards
\c - - - :worker_1_port
SELECT * FROM partitioning_test ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 1: SELECT * FROM partitioning_test ORDER BY 1;
^
-- see from MX node, partitioning hierarchy is built
SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 1: ...elid::regclass FROM pg_inherits WHERE inhparent = 'partition...
^
\c - - - :master_port
-- 5-) Detaching partition of the partitioned table
ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009;
ERROR: syntax error at or near "DETACH"
LINE 1: ALTER TABLE partitioning_test DETACH PARTITION partitioning_...
^
-- see from MX node, partitioning hierarchy is built
\c - - - :worker_1_port
SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1;
ERROR: relation "partitioning_test" does not exist
LINE 1: ...elid::regclass FROM pg_inherits WHERE inhparent = 'partition...
^
-- make sure DROPping from worker node is not allowed
DROP TABLE partitioning_test;
ERROR: table "partitioning_test" does not exist
\c - - - :master_port
-- make sure we can repeatedly call start_metadata_sync_to_node
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
-----------------------------
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
-----------------------------
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
-----------------------------
(1 row)
-- make sure we can drop partitions
DROP TABLE partitioning_test_2009;
ERROR: table "partitioning_test_2009" does not exist
DROP TABLE partitioning_test_2010;
ERROR: table "partitioning_test_2010" does not exist
-- make sure we can drop partitioned table
DROP TABLE partitioning_test;
ERROR: table "partitioning_test" does not exist
DROP TABLE IF EXISTS partitioning_test_2013;
-- test schema drop with partitioned tables
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
CREATE SCHEMA partition_test;
SET SEARCH_PATH TO partition_test;
CREATE TABLE partition_parent_table(a int, b int, c int) PARTITION BY RANGE (b);
ERROR: syntax error at or near "PARTITION"
LINE 1: ...TABLE partition_parent_table(a int, b int, c int) PARTITION ...
^
SELECT create_distributed_table('partition_parent_table', 'a');
ERROR: relation "partition_parent_table" does not exist
LINE 1: SELECT create_distributed_table('partition_parent_table', 'a...
^
CREATE TABLE partition_0 PARTITION OF partition_parent_table FOR VALUES FROM (1) TO (10);
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partition_0 PARTITION OF partition_parent_table...
^
CREATE TABLE partition_1 PARTITION OF partition_parent_table FOR VALUES FROM (10) TO (20);
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partition_1 PARTITION OF partition_parent_table...
^
CREATE TABLE partition_2 PARTITION OF partition_parent_table FOR VALUES FROM (20) TO (30);
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partition_2 PARTITION OF partition_parent_table...
^
CREATE TABLE partition_3 PARTITION OF partition_parent_table FOR VALUES FROM (30) TO (40);
ERROR: syntax error at or near "PARTITION"
LINE 1: CREATE TABLE partition_3 PARTITION OF partition_parent_table...
^
DROP SCHEMA partition_test CASCADE;
RESET SEARCH_PATH;

View File

@ -4,13 +4,6 @@
SET citus.next_shard_id TO 1660000;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS server_version_above_ten;
server_version_above_ten
--------------------------
t
(1 row)
--
-- Distributed Partitioned Table Creation Tests
--

File diff suppressed because it is too large Load Diff

View File

@ -4,13 +4,6 @@
SET citus.next_shard_id TO 1660000;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS server_version_above_ten;
server_version_above_ten
--------------------------
t
(1 row)
--
-- Distributed Partitioned Table Creation Tests
--

View File

@ -1,11 +1,3 @@
-- This test has different output per major version
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
t
(1 row)
-- ===================================================================
-- create test functions
-- ===================================================================

View File

@ -1,431 +0,0 @@
-- This test has different output per major version
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
f
(1 row)
-- ===================================================================
-- create test functions
-- ===================================================================
CREATE FUNCTION generate_alter_table_detach_partition_command(regclass)
RETURNS text
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION generate_alter_table_attach_partition_command(regclass)
RETURNS text
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION generate_partition_information(regclass)
RETURNS text
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION print_partitions(regclass)
RETURNS text
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION table_inherits(regclass)
RETURNS bool
AS 'citus'
LANGUAGE C STRICT;
CREATE FUNCTION table_inherited(regclass)
RETURNS bool
AS 'citus'
LANGUAGE C STRICT;
CREATE OR REPLACE FUNCTION detach_and_attach_partition(partition_name regclass, parent_table_name regclass)
RETURNS void LANGUAGE plpgsql VOLATILE
AS $function$
DECLARE
detach_partition_command text := '';
attach_partition_command text := '';
command_result text := '';
BEGIN
-- first generate the command
SELECT public.generate_alter_table_attach_partition_command(partition_name) INTO attach_partition_command;
-- now genereate the detach command
SELECT public.generate_alter_table_detach_partition_command(partition_name) INTO detach_partition_command;
-- later detach the same partition
EXECUTE detach_partition_command;
-- not attach it again
EXECUTE attach_partition_command;
END;
$function$;
CREATE OR REPLACE FUNCTION drop_and_recreate_partitioned_table(parent_table_name regclass)
RETURNS void LANGUAGE plpgsql VOLATILE
AS $function$
DECLARE
command text := '';
BEGIN
-- first generate the command
CREATE TABLE partitioned_table_create_commands AS SELECT master_get_table_ddl_events(parent_table_name::text);
-- later detach the same partition
EXECUTE 'DROP TABLE ' || parent_table_name::text || ';';
FOR command IN SELECT * FROM partitioned_table_create_commands
LOOP
-- can do some processing here
EXECUTE command;
END LOOP;
DROP TABLE partitioned_table_create_commands;
END;
$function$;
-- create a partitioned table
CREATE TABLE date_partitioned_table(id int, time date) PARTITION BY RANGE (time);
-- we should be able to get the partitioning information even if there are no partitions
SELECT generate_partition_information('date_partitioned_table');
generate_partition_information
--------------------------------
RANGE ("time")
(1 row)
-- we should be able to drop and re-create the partitioned table using the command that Citus generate
SELECT drop_and_recreate_partitioned_table('date_partitioned_table');
drop_and_recreate_partitioned_table
-------------------------------------
(1 row)
-- we should also be able to see the PARTITION BY ... for the parent table
SELECT master_get_table_ddl_events('date_partitioned_table');
master_get_table_ddl_events
---------------------------------------------------------------------------------------------------
CREATE TABLE public.date_partitioned_table (id integer, "time" date) PARTITION BY RANGE ("time")
ALTER TABLE public.date_partitioned_table OWNER TO postgres
(2 rows)
-- now create the partitions
CREATE TABLE date_partition_2006 PARTITION OF date_partitioned_table FOR VALUES FROM ('2006-01-01') TO ('2007-01-01');
CREATE TABLE date_partition_2007 PARTITION OF date_partitioned_table FOR VALUES FROM ('2007-01-01') TO ('2008-01-01');
-- we should be able to get the partitioning information after the partitions are created
SELECT generate_partition_information('date_partitioned_table');
generate_partition_information
--------------------------------
RANGE ("time")
(1 row)
-- lets get the attach partition commands
SELECT generate_alter_table_attach_partition_command('date_partition_2006');
generate_alter_table_attach_partition_command
-----------------------------------------------------------------------------------------------------------------------------------------
ALTER TABLE public.date_partitioned_table ATTACH PARTITION public.date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007');
(1 row)
SELECT generate_alter_table_attach_partition_command('date_partition_2007');
generate_alter_table_attach_partition_command
-----------------------------------------------------------------------------------------------------------------------------------------
ALTER TABLE public.date_partitioned_table ATTACH PARTITION public.date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008');
(1 row)
-- detach and attach the partition by the command generated by us
\d+ date_partitioned_table
Table "public.date_partitioned_table"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------+---------+-----------+----------+---------+---------+--------------+-------------
id | integer | | | | plain | |
time | date | | | | plain | |
Partition key: RANGE ("time")
Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'),
date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008')
SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_table');
detach_and_attach_partition
-----------------------------
(1 row)
-- check that both partitions are visiable
\d+ date_partitioned_table
Table "public.date_partitioned_table"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------+---------+-----------+----------+---------+---------+--------------+-------------
id | integer | | | | plain | |
time | date | | | | plain | |
Partition key: RANGE ("time")
Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'),
date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008')
-- make sure that inter shard commands work as expected
-- assume that the shardId is 100
CREATE TABLE date_partitioned_table_100 (id int, time date) PARTITION BY RANGE (time);
CREATE TABLE date_partition_2007_100 (id int, time date );
-- now create the partitioning hierarcy
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',
referenced_shard:=100, referenced_schema_name:='public',
command:='ALTER TABLE date_partitioned_table ATTACH PARTITION date_partition_2007 FOR VALUES FROM (''2007-01-01'') TO (''2008-01-02'')' );
worker_apply_inter_shard_ddl_command
--------------------------------------
(1 row)
-- the hierarcy is successfully created
\d+ date_partitioned_table_100
Table "public.date_partitioned_table_100"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------+---------+-----------+----------+---------+---------+--------------+-------------
id | integer | | | | plain | |
time | date | | | | plain | |
Partition key: RANGE ("time")
Partitions: date_partition_2007_100 FOR VALUES FROM ('01-01-2007') TO ('01-02-2008')
-- Citus can also get the DDL events for the partitions as regular tables
SELECT master_get_table_ddl_events('date_partition_2007_100');
master_get_table_ddl_events
-----------------------------------------------------------------------
CREATE TABLE public.date_partition_2007_100 (id integer, "time" date)
ALTER TABLE public.date_partition_2007_100 OWNER TO postgres
(2 rows)
-- now break the partitioning hierarcy
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',
referenced_shard:=100, referenced_schema_name:='public',
command:='ALTER TABLE date_partitioned_table DETACH PARTITION date_partition_2007' );
worker_apply_inter_shard_ddl_command
--------------------------------------
(1 row)
-- the hierarcy is successfully broken
\d+ date_partitioned_table_100
Table "public.date_partitioned_table_100"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------+---------+-----------+----------+---------+---------+--------------+-------------
id | integer | | | | plain | |
time | date | | | | plain | |
Partition key: RANGE ("time")
-- now lets have some more complex partitioning hierarcies with
-- tables on different schemas and constraints on the tables
CREATE SCHEMA partition_parent_schema;
CREATE TABLE partition_parent_schema.parent_table (id int NOT NULL, time date DEFAULT now()) PARTITION BY RANGE (time);
CREATE SCHEMA partition_child_1_schema;
CREATE TABLE partition_child_1_schema.child_1 (id int NOT NULL, time date );
CREATE SCHEMA partition_child_2_schema;
CREATE TABLE partition_child_2_schema.child_2 (id int NOT NULL, time date );
-- we should be able to get the partitioning information even if there are no partitions
SELECT generate_partition_information('partition_parent_schema.parent_table');
generate_partition_information
--------------------------------
RANGE ("time")
(1 row)
-- we should be able to drop and re-create the partitioned table using the command that Citus generate
SELECT drop_and_recreate_partitioned_table('partition_parent_schema.parent_table');
drop_and_recreate_partitioned_table
-------------------------------------
(1 row)
ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('2009-01-01') TO ('2010-01-02');
SET search_path = 'partition_parent_schema';
ALTER TABLE parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('2006-01-01') TO ('2007-01-01');
SELECT public.generate_partition_information('parent_table');
generate_partition_information
--------------------------------
RANGE ("time")
(1 row)
-- lets get the attach partition commands
SELECT public.generate_alter_table_attach_partition_command('partition_child_1_schema.child_1');
generate_alter_table_attach_partition_command
------------------------------------------------------------------------------------------------------------------------------------------------------
ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010');
(1 row)
SET search_path = 'partition_child_2_schema';
SELECT public.generate_alter_table_attach_partition_command('child_2');
generate_alter_table_attach_partition_command
------------------------------------------------------------------------------------------------------------------------------------------------------
ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007');
(1 row)
SET search_path = 'partition_parent_schema';
-- detach and attach the partition by the command generated by us
\d+ parent_table
Table "partition_parent_schema.parent_table"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------+---------+-----------+----------+---------+---------+--------------+-------------
id | integer | | not null | | plain | |
time | date | | | now() | plain | |
Partition key: RANGE ("time")
Partitions: partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'),
partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007')
SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'parent_table');
detach_and_attach_partition
-----------------------------
(1 row)
-- check that both partitions are visiable
\d+ parent_table
Table "partition_parent_schema.parent_table"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------+---------+-----------+----------+---------+---------+--------------+-------------
id | integer | | not null | | plain | |
time | date | | | now() | plain | |
Partition key: RANGE ("time")
Partitions: partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'),
partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007')
-- some very simple checks that should error out
SELECT public.generate_alter_table_attach_partition_command('parent_table');
ERROR: "parent_table" is not a partition
SELECT public.generate_partition_information('partition_child_1_schema.child_1');
ERROR: "child_1" is not a parent table
SELECT public.print_partitions('partition_child_1_schema.child_1');
ERROR: "child_1" is not a parent table
-- now pring the partitions
SELECT public.print_partitions('parent_table');
print_partitions
------------------
child_1,child_2
(1 row)
SET search_path = 'public';
-- test multi column / expression partitioning with UNBOUNDED ranges
CREATE OR REPLACE FUNCTION some_function(input_val text)
RETURNS text LANGUAGE plpgsql IMMUTABLE
AS $function$
BEGIN
return reverse(input_val);
END;
$function$;
CREATE TABLE multi_column_partitioned (
a int,
b int,
c text
) PARTITION BY RANGE (a, (a+b+1), some_function(upper(c)));
CREATE TABLE multi_column_partition_1(
a int,
b int,
c text
);
CREATE TABLE multi_column_partition_2(
a int,
b int,
c text
);
-- partitioning information
SELECT generate_partition_information('multi_column_partitioned');
generate_partition_information
-----------------------------------------------------
RANGE (a, (((a + b) + 1)), some_function(upper(c)))
(1 row)
SELECT master_get_table_ddl_events('multi_column_partitioned');
master_get_table_ddl_events
------------------------------------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.multi_column_partitioned (a integer, b integer, c text) PARTITION BY RANGE (a, (((a + b) + 1)), public.some_function(upper(c)))
ALTER TABLE public.multi_column_partitioned OWNER TO postgres
(2 rows)
SELECT drop_and_recreate_partitioned_table('multi_column_partitioned');
drop_and_recreate_partitioned_table
-------------------------------------
(1 row)
-- partitions and their ranges
ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250');
SELECT generate_alter_table_attach_partition_command('multi_column_partition_1');
generate_alter_table_attach_partition_command
------------------------------------------------------------------------------------------------------------------------------------------------
ALTER TABLE public.multi_column_partitioned ATTACH PARTITION public.multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250');
(1 row)
ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE);
SELECT generate_alter_table_attach_partition_command('multi_column_partition_2');
generate_alter_table_attach_partition_command
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
ALTER TABLE public.multi_column_partitioned ATTACH PARTITION public.multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE);
(1 row)
SELECT generate_alter_table_detach_partition_command('multi_column_partition_2');
generate_alter_table_detach_partition_command
---------------------------------------------------------------------------------------------------------
ALTER TABLE IF EXISTS public.multi_column_partitioned DETACH PARTITION public.multi_column_partition_2;
(1 row)
-- finally a test with LIST partitioning
CREATE TABLE list_partitioned (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION BY LIST (col1) ;
SELECT generate_partition_information('list_partitioned');
generate_partition_information
--------------------------------
LIST (col1)
(1 row)
SELECT master_get_table_ddl_events('list_partitioned');
master_get_table_ddl_events
-------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.list_partitioned (col1 numeric, col2 numeric, col3 character varying(10)) PARTITION BY LIST (col1)
ALTER TABLE public.list_partitioned OWNER TO postgres
(2 rows)
SELECT drop_and_recreate_partitioned_table('list_partitioned');
drop_and_recreate_partitioned_table
-------------------------------------
(1 row)
CREATE TABLE list_partitioned_1 PARTITION OF list_partitioned FOR VALUES IN (100, 101, 102, 103, 104);
SELECT generate_alter_table_attach_partition_command('list_partitioned_1');
generate_alter_table_attach_partition_command
-----------------------------------------------------------------------------------------------------------------------------------
ALTER TABLE public.list_partitioned ATTACH PARTITION public.list_partitioned_1 FOR VALUES IN ('100', '101', '102', '103', '104');
(1 row)
-- also differentiate partitions and inhereted tables
CREATE TABLE cities (
name text,
population float,
altitude int -- in feet
);
CREATE TABLE capitals (
state char(2)
) INHERITS (cities);
-- returns true since capitals inherits from cities
SELECT table_inherits('capitals');
table_inherits
----------------
t
(1 row)
-- although date_partition_2006 inherits from its parent
-- returns false since the hierarcy is formed via partitioning
SELECT table_inherits('date_partition_2006');
table_inherits
----------------
f
(1 row)
-- returns true since cities inherited by capitals
SELECT table_inherited('cities');
table_inherited
-----------------
t
(1 row)
-- although date_partitioned_table inherited by its partitions
-- returns false since the hierarcy is formed via partitioning
SELECT table_inherited('date_partitioned_table');
table_inherited
-----------------
f
(1 row)
-- also these are not supported
SELECT master_get_table_ddl_events('capitals');
ERROR: capitals is not a regular, foreign or partitioned table
SELECT master_get_table_ddl_events('cities');
ERROR: cities is not a regular, foreign or partitioned table
-- dropping parents frop the partitions
DROP TABLE date_partitioned_table, multi_column_partitioned, list_partitioned, partition_parent_schema.parent_table, cities, capitals;

View File

@ -7,14 +7,6 @@
-- executor here, as we cannot run repartition jobs with real time executor.
SET citus.next_shard_id TO 690000;
SET citus.enable_unique_job_ids TO off;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
version_above_nine
--------------------
t
(1 row)
BEGIN;
SET client_min_messages TO DEBUG4;
SET citus.task_executor_type TO 'task-tracker';

View File

@ -6,14 +6,6 @@
-- from a sql task to its depended tasks. Note that we set the executor type to task
-- tracker executor here, as we cannot run repartition jobs with real time executor.
SET citus.next_shard_id TO 710000;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
version_above_nine
--------------------
t
(1 row)
BEGIN;
SET client_min_messages TO DEBUG3;
SET citus.task_executor_type TO 'task-tracker';

View File

@ -1,922 +0,0 @@
--
-- multi shard update delete
-- this file is intended to test multi shard update/delete queries
--
SET citus.next_shard_id TO 1440000;
SET citus.shard_replication_factor to 1;
SET citus.multi_shard_modify_mode to 'parallel';
CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int);
SELECT create_distributed_table('users_test_table', 'user_id');
create_distributed_table
--------------------------
(1 row)
\COPY users_test_table FROM STDIN DELIMITER AS ',';
CREATE TABLE events_test_table (user_id int, value_1 int, value_2 int, value_3 int);
SELECT create_distributed_table('events_test_table', 'user_id');
create_distributed_table
--------------------------
(1 row)
\COPY events_test_table FROM STDIN DELIMITER AS ',';
CREATE TABLE events_reference_copy_table (like events_test_table);
SELECT create_reference_table('events_reference_copy_table');
create_reference_table
------------------------
(1 row)
INSERT INTO events_reference_copy_table SELECT * FROM events_test_table;
CREATE TABLE users_reference_copy_table (like users_test_table);
SELECT create_reference_table('users_reference_copy_table');
create_reference_table
------------------------
(1 row)
INSERT INTO users_reference_copy_table SELECT * FROM users_test_table;
-- Run multi shard updates and deletes without transaction on hash distributed tables
UPDATE users_test_table SET value_1 = 1;
SELECT COUNT(*), SUM(value_1) FROM users_test_table;
count | sum
-------+-----
15 | 15
(1 row)
SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3;
count | sum
-------+-----
4 | 52
(1 row)
UPDATE users_test_table SET value_2 = value_2 + 1 WHERE user_id = 1 or user_id = 3;
SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3;
count | sum
-------+-----
4 | 56
(1 row)
UPDATE users_test_table SET value_3 = 0 WHERE user_id <> 5;
SELECT SUM(value_3) FROM users_test_table WHERE user_id <> 5;
sum
-----
0
(1 row)
SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5;
count
-------
4
(1 row)
DELETE FROM users_test_table WHERE user_id = 3 or user_id = 5;
SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5;
count
-------
0
(1 row)
-- Run multi shard update delete queries within transactions
BEGIN;
UPDATE users_test_table SET value_3 = 0;
END;
SELECT SUM(value_3) FROM users_test_table;
sum
-----
0
(1 row)
-- Update can also be rollbacked
BEGIN;
UPDATE users_test_table SET value_3 = 1;
ROLLBACK;
SELECT SUM(value_3) FROM users_test_table;
sum
-----
0
(1 row)
-- Run with inserts (we need to set citus.multi_shard_modify_mode to sequential)
BEGIN;
INSERT INTO users_test_table (user_id, value_3) VALUES(20, 15);
INSERT INTO users_test_table (user_id, value_3) VALUES(16,1), (20,16), (7,1), (20,17);
SET citus.multi_shard_modify_mode to sequential;
UPDATE users_test_table SET value_3 = 1;
END;
SELECT COUNT()SUM(value_3) FROM users_test_table;
ERROR: syntax error at or near "("
LINE 1: SELECT COUNT()SUM(value_3) FROM users_test_table;
^
SET citus.multi_shard_modify_mode to 'sequential';
-- Run multiple multi shard updates (with sequential executor)
BEGIN;
UPDATE users_test_table SET value_3 = 5;
UPDATE users_test_table SET value_3 = 0;
END;
SELECT SUM(value_3) FROM users_copy_table;
ERROR: relation "users_copy_table" does not exist
LINE 1: SELECT SUM(value_3) FROM users_copy_table;
^
-- Run multiple multi shard updates (with parallel executor)
SET citus.multi_shard_modify_mode to 'parallel';
UPDATE users_test_table SET value_3 = 5;
BEGIN;
UPDATE users_test_table SET value_3 = 2;
UPDATE users_test_table SET value_3 = 0;
END;
SELECT SUM(value_3) FROM users_test_table;
sum
-----
0
(1 row)
-- Check with kind of constraints
UPDATE users_test_table SET value_3 = 1 WHERE user_id = 3 or true;
SELECT COUNT(*), SUM(value_3) FROM users_test_table;
count | sum
-------+-----
16 | 16
(1 row)
UPDATE users_test_table SET value_3 = 0 WHERE user_id = 20 and false;
SELECT COUNT(*), SUM(value_3) FROM users_test_table;
count | sum
-------+-----
16 | 16
(1 row)
-- Run multi shard updates with prepared statements
PREPARE foo_plan(int,int) AS UPDATE users_test_table SET value_1 = $1, value_3 = $2;
EXECUTE foo_plan(1,5);
EXECUTE foo_plan(3,15);
EXECUTE foo_plan(5,25);
EXECUTE foo_plan(7,35);
EXECUTE foo_plan(9,45);
EXECUTE foo_plan(0,0);
SELECT SUM(value_1), SUM(value_3) FROM users_test_table;
sum | sum
-----+-----
0 | 0
(1 row)
-- Test on append table (set executor mode to sequential, since with the append
-- distributed tables parallel executor may create tons of connections)
SET citus.multi_shard_modify_mode to sequential;
CREATE TABLE append_stage_table(id int, col_2 int);
INSERT INTO append_stage_table VALUES(1,3);
INSERT INTO append_stage_table VALUES(3,2);
INSERT INTO append_stage_table VALUES(5,4);
CREATE TABLE append_stage_table_2(id int, col_2 int);
INSERT INTO append_stage_table_2 VALUES(8,3);
INSERT INTO append_stage_table_2 VALUES(9,2);
INSERT INTO append_stage_table_2 VALUES(10,4);
CREATE TABLE test_append_table(id int, col_2 int);
SELECT create_distributed_table('test_append_table','id','append');
create_distributed_table
--------------------------
(1 row)
SELECT master_create_empty_shard('test_append_table');
master_create_empty_shard
---------------------------
1440010
(1 row)
SELECT * FROM master_append_table_to_shard(1440010, 'append_stage_table', 'localhost', :master_port);
master_append_table_to_shard
------------------------------
0.00533333
(1 row)
SELECT master_create_empty_shard('test_append_table') AS new_shard_id;
new_shard_id
--------------
1440011
(1 row)
SELECT * FROM master_append_table_to_shard(1440011, 'append_stage_table_2', 'localhost', :master_port);
master_append_table_to_shard
------------------------------
0.00533333
(1 row)
UPDATE test_append_table SET col_2 = 5;
SELECT * FROM test_append_table;
id | col_2
----+-------
8 | 5
9 | 5
10 | 5
1 | 5
3 | 5
5 | 5
(6 rows)
DROP TABLE append_stage_table;
DROP TABLE append_stage_table_2;
DROP TABLE test_append_table;
-- Update multi shard of partitioned distributed table
SET citus.multi_shard_modify_mode to 'parallel';
SET citus.shard_replication_factor to 1;
CREATE TABLE tt1(id int, col_2 int) partition by range (col_2);
ERROR: syntax error at or near "partition"
LINE 1: CREATE TABLE tt1(id int, col_2 int) partition by range (col_...
^
CREATE TABLE tt1_510 partition of tt1 for VALUES FROM (5) to (10);
ERROR: syntax error at or near "partition"
LINE 1: CREATE TABLE tt1_510 partition of tt1 for VALUES FROM (5) to...
^
CREATE TABLE tt1_1120 partition of tt1 for VALUES FROM (11) to (20);
ERROR: syntax error at or near "partition"
LINE 1: CREATE TABLE tt1_1120 partition of tt1 for VALUES FROM (11) ...
^
INSERT INTO tt1 VALUES (1,11), (3,15), (5,17), (6,19), (8,17), (2,12);
ERROR: relation "tt1" does not exist
LINE 1: INSERT INTO tt1 VALUES (1,11), (3,15), (5,17), (6,19), (8,17...
^
SELECT create_distributed_table('tt1','id');
ERROR: relation "tt1" does not exist
LINE 1: SELECT create_distributed_table('tt1','id');
^
UPDATE tt1 SET col_2 = 13;
ERROR: relation "tt1" does not exist
LINE 1: UPDATE tt1 SET col_2 = 13;
^
DELETE FROM tt1 WHERE id = 1 or id = 3 or id = 5;
ERROR: relation "tt1" does not exist
LINE 1: DELETE FROM tt1 WHERE id = 1 or id = 3 or id = 5;
^
SELECT * FROM tt1;
ERROR: relation "tt1" does not exist
LINE 1: SELECT * FROM tt1;
^
-- Partitioned distributed table within transaction
INSERT INTO tt1 VALUES(4,6);
ERROR: relation "tt1" does not exist
LINE 1: INSERT INTO tt1 VALUES(4,6);
^
INSERT INTO tt1 VALUES(7,7);
ERROR: relation "tt1" does not exist
LINE 1: INSERT INTO tt1 VALUES(7,7);
^
INSERT INTO tt1 VALUES(9,8);
ERROR: relation "tt1" does not exist
LINE 1: INSERT INTO tt1 VALUES(9,8);
^
BEGIN;
-- Update rows from partititon tt1_1120
UPDATE tt1 SET col_2 = 12 WHERE col_2 > 10 and col_2 < 20;
ERROR: relation "tt1" does not exist
LINE 1: UPDATE tt1 SET col_2 = 12 WHERE col_2 > 10 and col_2 < 20;
^
-- Update rows from partititon tt1_510
UPDATE tt1 SET col_2 = 7 WHERE col_2 < 10 and col_2 > 5;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
SELECT * FROM tt1 ORDER BY id;
ERROR: relation "tt1" does not exist
LINE 1: SELECT * FROM tt1 ORDER BY id;
^
-- Modify main table and partition table within same transaction
BEGIN;
UPDATE tt1 SET col_2 = 12 WHERE col_2 > 10 and col_2 < 20;
ERROR: relation "tt1" does not exist
LINE 1: UPDATE tt1 SET col_2 = 12 WHERE col_2 > 10 and col_2 < 20;
^
UPDATE tt1 SET col_2 = 7 WHERE col_2 < 10 and col_2 > 5;
ERROR: current transaction is aborted, commands ignored until end of transaction block
DELETE FROM tt1_510;
ERROR: current transaction is aborted, commands ignored until end of transaction block
DELETE FROM tt1_1120;
ERROR: current transaction is aborted, commands ignored until end of transaction block
COMMIT;
SELECT * FROM tt1 ORDER BY id;
ERROR: relation "tt1" does not exist
LINE 1: SELECT * FROM tt1 ORDER BY id;
^
DROP TABLE tt1;
ERROR: table "tt1" does not exist
-- Update and copy in the same transaction
CREATE TABLE tt2(id int, col_2 int);
SELECT create_distributed_table('tt2','id');
create_distributed_table
--------------------------
(1 row)
BEGIN;
\COPY tt2 FROM STDIN DELIMITER AS ',';
UPDATE tt2 SET col_2 = 1;
COMMIT;
SELECT * FROM tt2 ORDER BY id;
id | col_2
----+-------
1 | 1
2 | 1
3 | 1
7 | 1
9 | 1
(5 rows)
-- Test returning with both type of executors
UPDATE tt2 SET col_2 = 5 RETURNING id, col_2;
id | col_2
----+-------
1 | 5
3 | 5
7 | 5
9 | 5
2 | 5
(5 rows)
SET citus.multi_shard_modify_mode to sequential;
UPDATE tt2 SET col_2 = 3 RETURNING id, col_2;
id | col_2
----+-------
1 | 3
3 | 3
7 | 3
9 | 3
2 | 3
(5 rows)
DROP TABLE tt2;
-- Multiple RTEs are only supported if subquery is pushdownable
SET citus.multi_shard_modify_mode to DEFAULT;
-- To test colocation between tables in modify query
SET citus.shard_count to 6;
CREATE TABLE events_test_table_2 (user_id int, value_1 int, value_2 int, value_3 int);
SELECT create_distributed_table('events_test_table_2', 'user_id');
create_distributed_table
--------------------------
(1 row)
\COPY events_test_table_2 FROM STDIN DELIMITER AS ',';
CREATE TABLE events_test_table_local (user_id int, value_1 int, value_2 int, value_3 int);
\COPY events_test_table_local FROM STDIN DELIMITER AS ',';
CREATE TABLE test_table_1(id int, date_col timestamptz, col_3 int);
INSERT INTO test_table_1 VALUES(1, '2014-04-05 08:32:12', 5);
INSERT INTO test_table_1 VALUES(2, '2015-02-01 08:31:16', 7);
INSERT INTO test_table_1 VALUES(3, '2111-01-12 08:35:19', 9);
SELECT create_distributed_table('test_table_1', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
-- We can pushdown query if there is partition key equality
UPDATE users_test_table
SET value_2 = 5
FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id;
DELETE FROM users_test_table
USING events_test_table
WHERE users_test_table.user_id = events_test_table.user_id;
UPDATE users_test_table
SET value_1 = 3
WHERE user_id IN (SELECT user_id
FROM events_test_table);
DELETE FROM users_test_table
WHERE user_id IN (SELECT user_id
FROM events_test_table);
DELETE FROM events_test_table_2
WHERE now() > (SELECT max(date_col)
FROM test_table_1
WHERE test_table_1.id = events_test_table_2.user_id
GROUP BY id)
RETURNING *;
user_id | value_1 | value_2 | value_3
---------+---------+---------+---------
1 | 5 | 7 | 7
1 | 20 | 12 | 25
1 | 60 | 17 | 17
(3 rows)
UPDATE users_test_table
SET value_1 = 5
FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id
AND events_test_table.user_id > 5;
UPDATE users_test_table
SET value_1 = 4
WHERE user_id IN (SELECT user_id
FROM users_test_table
UNION
SELECT user_id
FROM events_test_table);
UPDATE users_test_table
SET value_1 = 4
WHERE user_id IN (SELECT user_id
FROM users_test_table
UNION
SELECT user_id
FROM events_test_table) returning value_3;
value_3
---------
0
0
0
0
0
0
0
0
0
0
0
(11 rows)
UPDATE users_test_table
SET value_1 = 4
WHERE user_id IN (SELECT user_id
FROM users_test_table
UNION ALL
SELECT user_id
FROM events_test_table) returning value_3;
value_3
---------
0
0
0
0
0
0
0
0
0
0
0
(11 rows)
UPDATE users_test_table
SET value_1 = 5
WHERE
value_2 >
(SELECT
max(value_2)
FROM
events_test_table
WHERE
users_test_table.user_id = events_test_table.user_id
GROUP BY
user_id
);
UPDATE users_test_table
SET value_3 = 1
WHERE
value_2 >
(SELECT
max(value_2)
FROM
events_test_table
WHERE
users_test_table.user_id = events_test_table.user_id AND
users_test_table.value_2 > events_test_table.value_2
GROUP BY
user_id
);
UPDATE users_test_table
SET value_2 = 4
WHERE
value_1 > 1 AND value_1 < 3
AND value_2 >= 1
AND user_id IN
(
SELECT
e1.user_id
FROM (
SELECT
user_id,
1 AS view_homepage
FROM events_test_table
WHERE
value_1 IN (0, 1)
) e1 LEFT JOIN LATERAL (
SELECT
user_id,
1 AS use_demo
FROM events_test_table
WHERE
user_id = e1.user_id
) e2 ON true
);
UPDATE users_test_table
SET value_3 = 5
WHERE value_2 IN (SELECT AVG(value_1) OVER (PARTITION BY user_id) FROM events_test_table WHERE events_test_table.user_id = users_test_table.user_id);
-- Test it within transaction
BEGIN;
INSERT INTO users_test_table
SELECT * FROM events_test_table
WHERE events_test_table.user_id = 1 OR events_test_table.user_id = 5;
SELECT SUM(value_2) FROM users_test_table;
sum
-----
169
(1 row)
UPDATE users_test_table
SET value_2 = 1
FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id;
SELECT SUM(value_2) FROM users_test_table;
sum
-----
97
(1 row)
COMMIT;
-- Test with schema
CREATE SCHEMA sec_schema;
CREATE TABLE sec_schema.tt1(id int, value_1 int);
SELECT create_distributed_table('sec_schema.tt1','id');
create_distributed_table
--------------------------
(1 row)
INSERT INTO sec_schema.tt1 values(1,1),(2,2),(7,7),(9,9);
UPDATE sec_schema.tt1
SET value_1 = 11
WHERE id < (SELECT max(value_2) FROM events_test_table_2
WHERE sec_schema.tt1.id = events_test_table_2.user_id
GROUP BY user_id)
RETURNING *;
id | value_1
----+---------
7 | 11
9 | 11
(2 rows)
DROP SCHEMA sec_schema CASCADE;
NOTICE: drop cascades to table sec_schema.tt1
-- We don't need partition key equality with reference tables
UPDATE events_test_table
SET value_2 = 5
FROM users_reference_copy_table
WHERE users_reference_copy_table.user_id = events_test_table.value_1;
-- Both reference tables and hash distributed tables can be used in subquery
UPDATE events_test_table as ett
SET value_2 = 6
WHERE ett.value_3 IN (SELECT utt.value_3
FROM users_test_table as utt, users_reference_copy_table as uct
WHERE utt.user_id = uct.user_id AND utt.user_id = ett.user_id);
-- We don't need equality check with constant values in sub-select
UPDATE users_reference_copy_table
SET value_2 = 6
WHERE user_id IN (SELECT 2);
UPDATE users_reference_copy_table
SET value_2 = 6
WHERE value_1 IN (SELECT 2);
UPDATE users_test_table
SET value_2 = 6
WHERE user_id IN (SELECT 2);
UPDATE users_test_table
SET value_2 = 6
WHERE value_1 IN (SELECT 2);
-- Function calls in subqueries will be recursively planned
UPDATE test_table_1
SET col_3 = 6
WHERE date_col IN (SELECT now());
-- Test with prepared statements
SELECT COUNT(*) FROM users_test_table WHERE value_1 = 0;
count
-------
0
(1 row)
PREPARE foo_plan_2(int,int) AS UPDATE users_test_table
SET value_1 = $1, value_3 = $2
FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id;
EXECUTE foo_plan_2(1,5);
EXECUTE foo_plan_2(3,15);
EXECUTE foo_plan_2(5,25);
EXECUTE foo_plan_2(7,35);
EXECUTE foo_plan_2(9,45);
EXECUTE foo_plan_2(0,0);
SELECT COUNT(*) FROM users_test_table WHERE value_1 = 0;
count
-------
6
(1 row)
-- Test with varying WHERE expressions
UPDATE users_test_table
SET value_1 = 7
FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id OR FALSE;
UPDATE users_test_table
SET value_1 = 7
FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id AND TRUE;
-- Test with inactive shard-placement
-- manually set shardstate of one placement of users_test_table as inactive
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1440000;
UPDATE users_test_table
SET value_2 = 5
FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id;
ERROR: cannot find a worker that has active placements for all shards in the query
-- manually set shardstate of one placement of events_test_table as inactive
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1440004;
UPDATE users_test_table
SET value_2 = 5
FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id;
ERROR: cannot find a worker that has active placements for all shards in the query
UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardid = 1440000;
UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardid = 1440004;
-- Subquery must return single value to use it with comparison operators
UPDATE users_test_table as utt
SET value_1 = 3
WHERE value_2 > (SELECT value_3 FROM events_test_table as ett WHERE utt.user_id = ett.user_id);
ERROR: more than one row returned by a subquery used as an expression
CONTEXT: while executing command on localhost:57637
-- We can not pushdown a query if the target relation is reference table
UPDATE users_reference_copy_table
SET value_2 = 5
FROM events_test_table
WHERE users_reference_copy_table.user_id = events_test_table.user_id;
ERROR: only reference tables may be queried when targeting a reference table with multi shard UPDATE/DELETE queries with multiple tables
-- We cannot push down it if the query has outer join and using
UPDATE events_test_table
SET value_2 = users_test_table.user_id
FROM users_test_table
FULL OUTER JOIN events_test_table e2 USING (user_id)
WHERE e2.user_id = events_test_table.user_id RETURNING events_test_table.value_2;
ERROR: a join with USING causes an internal naming conflict, use ON instead
-- Non-pushdownable subqueries, but will be handled through recursive planning
UPDATE users_test_table
SET value_1 = 1
WHERE user_id IN (SELECT Count(value_1)
FROM events_test_table
GROUP BY user_id);
UPDATE users_test_table
SET value_1 = (SELECT Count(*)
FROM events_test_table);
UPDATE users_test_table
SET value_1 = 4
WHERE user_id IN (SELECT user_id
FROM users_test_table
UNION
SELECT value_1
FROM events_test_table);
UPDATE users_test_table
SET value_1 = 4
WHERE user_id IN (SELECT user_id
FROM users_test_table
INTERSECT
SELECT Sum(value_1)
FROM events_test_table
GROUP BY user_id);
UPDATE users_test_table
SET value_2 = (SELECT value_3
FROM users_test_table);
ERROR: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
UPDATE users_test_table
SET value_2 = 2
WHERE
value_2 >
(SELECT
max(value_2)
FROM
events_test_table
WHERE
users_test_table.user_id > events_test_table.user_id AND
users_test_table.value_1 = events_test_table.value_1
GROUP BY
user_id
);
ERROR: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
UPDATE users_test_table
SET (value_1, value_2) = (2,1)
WHERE user_id IN
(SELECT user_id
FROM users_test_table
INTERSECT
SELECT user_id
FROM events_test_table);
-- Reference tables can not locate on the outer part of the outer join
UPDATE users_test_table
SET value_1 = 4
WHERE user_id IN
(SELECT DISTINCT e2.user_id
FROM users_reference_copy_table
LEFT JOIN users_test_table e2 ON (e2.user_id = users_reference_copy_table.value_1)) RETURNING *;
ERROR: cannot pushdown the subquery
DETAIL: There exist a reference table in the outer part of the outer join
-- Volatile functions are also not supported
UPDATE users_test_table
SET value_2 = 5
FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id * random();
ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE
UPDATE users_test_table
SET value_2 = 5 * random()
FROM events_test_table
WHERE users_test_table.user_id = events_test_table.user_id;
ERROR: functions used in UPDATE queries on distributed tables must not be VOLATILE
-- Recursive modify planner does not take care of following test because the query
-- is fully pushdownable, yet not allowed because it would lead to inconsistent replicas.
UPDATE users_test_table
SET value_2 = subquery.random FROM (SELECT user_id, random()
FROM events_test_table) subquery
WHERE users_test_table.user_id = subquery.user_id;
ERROR: functions used in UPDATE queries on distributed tables must not be VOLATILE
-- Volatile functions in a subquery are recursively planned
UPDATE users_test_table
SET value_2 = 5
WHERE users_test_table.user_id IN (SELECT user_id * random() FROM events_test_table);
UPDATE users_test_table
SET value_2 = subquery.random FROM (SELECT user_id, random()
FROM events_test_table) subquery;
UPDATE users_test_table
SET value_2 = subquery.random FROM (SELECT user_id, random()
FROM events_test_table OFFSET 0) subquery
WHERE users_test_table.user_id = subquery.user_id;
-- Make following tests consistent
UPDATE users_test_table SET value_2 = 0;
-- Local tables are not supported
UPDATE users_test_table
SET value_2 = 5
FROM events_test_table_local
WHERE users_test_table.user_id = events_test_table_local.user_id;
ERROR: relation events_test_table_local is not distributed
UPDATE events_test_table_local
SET value_2 = 5
FROM users_test_table
WHERE events_test_table_local.user_id = users_test_table.user_id;
ERROR: relation events_test_table_local is not distributed
-- Local tables in a subquery are supported through recursive planning
UPDATE users_test_table
SET value_2 = 5
WHERE users_test_table.user_id IN(SELECT user_id FROM events_test_table_local);
-- Shard counts of tables must be equal to pushdown the query
UPDATE users_test_table
SET value_2 = 5
FROM events_test_table_2
WHERE users_test_table.user_id = events_test_table_2.user_id;
ERROR: cannot push down this subquery
DETAIL: Shards of relations in subquery need to have 1-to-1 shard partitioning
-- Should error out due to multiple row return from subquery, but we can not get this information within
-- subquery pushdown planner. This query will be sent to worker with recursive planner.
DELETE FROM users_test_table
WHERE users_test_table.user_id = (SELECT user_id
FROM events_test_table);
ERROR: more than one row returned by a subquery used as an expression
CONTEXT: while executing command on localhost:57637
-- Cursors are not supported
BEGIN;
DECLARE test_cursor CURSOR FOR SELECT * FROM users_test_table ORDER BY user_id;
FETCH test_cursor;
user_id | value_1 | value_2 | value_3
---------+---------+---------+---------
1 | 2 | 5 | 0
(1 row)
UPDATE users_test_table SET value_2 = 5 WHERE CURRENT OF test_cursor;
ERROR: cannot run DML queries with cursors
ROLLBACK;
-- Stable functions are supported
SELECT * FROM test_table_1;
id | date_col | col_3
----+------------------------------+-------
1 | Sat Apr 05 08:32:12 2014 PDT | 5
3 | Mon Jan 12 08:35:19 2111 PST | 9
2 | Sun Feb 01 08:31:16 2015 PST | 7
(3 rows)
UPDATE test_table_1 SET col_3 = 3 WHERE date_col < now();
SELECT * FROM test_table_1;
id | date_col | col_3
----+------------------------------+-------
1 | Sat Apr 05 08:32:12 2014 PDT | 3
3 | Mon Jan 12 08:35:19 2111 PST | 9
2 | Sun Feb 01 08:31:16 2015 PST | 3
(3 rows)
DELETE FROM test_table_1 WHERE date_col < current_timestamp;
SELECT * FROM test_table_1;
id | date_col | col_3
----+------------------------------+-------
3 | Mon Jan 12 08:35:19 2111 PST | 9
(1 row)
DROP TABLE test_table_1;
-- Volatile functions are not supported
CREATE TABLE test_table_2(id int, double_col double precision);
INSERT INTO test_table_2 VALUES(1, random());
INSERT INTO test_table_2 VALUES(2, random());
INSERT INTO test_table_2 VALUES(3, random());
SELECT create_distributed_table('test_table_2', 'id');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
UPDATE test_table_2 SET double_col = random();
ERROR: functions used in UPDATE queries on distributed tables must not be VOLATILE
DROP TABLE test_table_2;
-- Run multi shard updates and deletes without transaction on reference tables
SELECT COUNT(*) FROM users_reference_copy_table;
count
-------
15
(1 row)
UPDATE users_reference_copy_table SET value_1 = 1;
SELECT SUM(value_1) FROM users_reference_copy_table;
sum
-----
15
(1 row)
SELECT COUNT(*), SUM(value_2) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5;
count | sum
-------+-----
4 | 52
(1 row)
UPDATE users_reference_copy_table SET value_2 = value_2 + 1 WHERE user_id = 3 or user_id = 5;
SELECT COUNT(*), SUM(value_2) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5;
count | sum
-------+-----
4 | 56
(1 row)
UPDATE users_reference_copy_table SET value_3 = 0 WHERE user_id <> 3;
SELECT SUM(value_3) FROM users_reference_copy_table WHERE user_id <> 3;
sum
-----
0
(1 row)
DELETE FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5;
SELECT COUNT(*) FROM users_reference_copy_table WHERE user_id = 3 or user_id = 5;
count
-------
0
(1 row)
-- Do some tests by changing shard replication factor
DROP TABLE users_test_table;
SET citus.shard_replication_factor to 2;
CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int);
SELECT create_distributed_table('users_test_table', 'user_id');
create_distributed_table
--------------------------
(1 row)
\COPY users_test_table FROM STDIN DELIMITER AS ',';
-- Run multi shard updates and deletes without transaction on hash distributed tables
UPDATE users_test_table SET value_1 = 1;
SELECT COUNT(*), SUM(value_1) FROM users_test_table;
count | sum
-------+-----
15 | 15
(1 row)
SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3;
count | sum
-------+-----
4 | 52
(1 row)
UPDATE users_test_table SET value_2 = value_2 + 1 WHERE user_id = 1 or user_id = 3;
SELECT COUNT(*), SUM(value_2) FROM users_test_table WHERE user_id = 1 or user_id = 3;
count | sum
-------+-----
4 | 56
(1 row)
UPDATE users_test_table SET value_3 = 0 WHERE user_id <> 5;
SELECT SUM(value_3) FROM users_test_table WHERE user_id <> 5;
sum
-----
0
(1 row)
SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5;
count
-------
4
(1 row)
DELETE FROM users_test_table WHERE user_id = 3 or user_id = 5;
SELECT COUNT(*) FROM users_test_table WHERE user_id = 3 or user_id = 5;
count
-------
0
(1 row)
DROP TABLE users_test_table;
DROP TABLE events_test_table;
DROP TABLE events_reference_copy_table;
DROP TABLE users_reference_copy_table;

View File

@ -2,17 +2,9 @@
-- MULTI_TASK_ASSIGNMENT
--
SET citus.next_shard_id TO 880000;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
version_above_nine
--------------------
t
(1 row)
-- the function simply parses the results and returns 'shardId@worker'
-- for all the explain task outputs
CREATE OR REPLACE FUNCTION parse_explain_output(in qry text, in table_name text, out r text)
CREATE OR REPLACE FUNCTION parse_explain_output(in qry text, in table_name text, out r text)
RETURNS SETOF TEXT AS $$
DECLARE
portOfTheTask text;
@ -78,25 +70,25 @@ SET client_min_messages TO DEBUG3;
SET citus.task_assignment_policy TO 'greedy';
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: Router planner does not support append-partitioned tables.
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: assigned task 1 to node localhost:57637
DEBUG: assigned task 3 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637
QUERY PLAN
-----------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: Router planner does not support append-partitioned tables.
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
DEBUG: assigned task 1 to node localhost:57637
DEBUG: assigned task 3 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637
QUERY PLAN
-----------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
@ -104,25 +96,25 @@ DEBUG: assigned task 2 to node localhost:57637
SET citus.task_assignment_policy TO 'first-replica';
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: Router planner does not support append-partitioned tables.
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
QUERY PLAN
-----------------------------------------------------------------------
DEBUG: assigned task 3 to node localhost:57638
QUERY PLAN
----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: Router planner does not support append-partitioned tables.
DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57637
DEBUG: assigned task 1 to node localhost:57638
QUERY PLAN
-----------------------------------------------------------------------
DEBUG: assigned task 3 to node localhost:57638
QUERY PLAN
----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
@ -145,7 +137,7 @@ DEBUG: Creating router plan
DEBUG: Plan is router executable
QUERY PLAN
--------------------------------------------------------------
Custom Scan (Citus Router)
Custom Scan (Citus Adaptive)
explain statements for distributed queries are not enabled
(2 rows)
@ -155,7 +147,7 @@ DEBUG: Creating router plan
DEBUG: Plan is router executable
QUERY PLAN
--------------------------------------------------------------
Custom Scan (Citus Router)
Custom Scan (Citus Adaptive)
explain statements for distributed queries are not enabled
(2 rows)
@ -166,7 +158,7 @@ DEBUG: Creating router plan
DEBUG: Plan is router executable
QUERY PLAN
--------------------------------------------------------------
Custom Scan (Citus Router)
Custom Scan (Citus Adaptive)
explain statements for distributed queries are not enabled
(2 rows)
@ -176,26 +168,26 @@ DEBUG: Creating router plan
DEBUG: Plan is router executable
QUERY PLAN
--------------------------------------------------------------
Custom Scan (Citus Router)
Custom Scan (Citus Adaptive)
explain statements for distributed queries are not enabled
(2 rows)
ROLLBACK;
RESET client_min_messages;
-- Now, lets test round-robin policy
-- round-robin policy relies on PostgreSQL's local transactionId,
-- round-robin policy relies on PostgreSQL's local transactionId,
-- which might change and we don't have any control over it.
-- the important thing that we look for is that round-robin policy
-- should give the same output for executions in the same transaction
-- the important thing that we look for is that round-robin policy
-- should give the same output for executions in the same transaction
-- and different output for executions that are not inside the
-- same transaction. To ensure that, we define a helper function
BEGIN;
SET LOCAL citus.explain_distributed_queries TO on;
CREATE TEMPORARY TABLE explain_outputs (value text);
SET LOCAL citus.task_assignment_policy TO 'round-robin';
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_reference_table;', 'task_assignment_reference_table');
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_reference_table;', 'task_assignment_reference_table');
-- given that we're in the same transaction, the count should be 1
SELECT count(DISTINCT value) FROM explain_outputs;
@ -211,9 +203,9 @@ COMMIT;
-- change on every execution
SET citus.task_assignment_policy TO 'round-robin';
SET citus.explain_distributed_queries TO ON;
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_reference_table;', 'task_assignment_reference_table');
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_reference_table;', 'task_assignment_reference_table');
-- given that we're in the same transaction, the count should be 2
-- since there are two different worker nodes
@ -226,7 +218,7 @@ SELECT count(DISTINCT value) FROM explain_outputs;
TRUNCATE explain_outputs;
-- same test with a distributed table
-- we keep this test because as of this commit, the code
-- paths for reference tables and distributed tables are
-- paths for reference tables and distributed tables are
-- not the same
SET citus.shard_replication_factor TO 2;
CREATE TABLE task_assignment_replicated_hash (test_id integer);
@ -239,9 +231,9 @@ SELECT create_distributed_table('task_assignment_replicated_hash', 'test_id');
BEGIN;
SET LOCAL citus.explain_distributed_queries TO on;
SET LOCAL citus.task_assignment_policy TO 'round-robin';
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_replicated_hash;', 'task_assignment_replicated_hash');
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_replicated_hash;', 'task_assignment_replicated_hash');
-- given that we're in the same transaction, the count should be 1
SELECT count(DISTINCT value) FROM explain_outputs;
@ -257,9 +249,9 @@ COMMIT;
-- change on every execution
SET citus.task_assignment_policy TO 'round-robin';
SET citus.explain_distributed_queries TO ON;
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_replicated_hash;', 'task_assignment_replicated_hash');
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_replicated_hash;', 'task_assignment_replicated_hash');
-- given that we're in the same transaction, the count should be 2
-- since there are two different worker nodes

View File

@ -1,12 +1,4 @@
SET citus.next_shard_id TO 990000;
-- print server version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 as version_above_ten;
version_above_ten
-------------------
t
(1 row)
-- ===================================================================
-- test utility statement functionality
-- ===================================================================

View File

@ -1,466 +0,0 @@
SET citus.next_shard_id TO 990000;
-- print server version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 as version_above_ten;
version_above_ten
-------------------
f
(1 row)
-- ===================================================================
-- test utility statement functionality
-- ===================================================================
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
CREATE TABLE sharded_table ( name text, id bigint );
SELECT create_distributed_table('sharded_table', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)
-- COPY out is supported with distributed tables
COPY sharded_table TO STDOUT;
COPY (SELECT COUNT(*) FROM sharded_table) TO STDOUT;
0
BEGIN;
SET TRANSACTION READ ONLY;
COPY sharded_table TO STDOUT;
COPY (SELECT COUNT(*) FROM sharded_table) TO STDOUT;
0
COMMIT;
-- ANALYZE is supported in a transaction block
BEGIN;
ANALYZE sharded_table;
ANALYZE sharded_table;
END;
-- cursors may not involve distributed tables
DECLARE all_sharded_rows CURSOR FOR SELECT * FROM sharded_table;
ERROR: DECLARE CURSOR can only be used in transaction blocks
-- verify PREPARE functionality
PREPARE sharded_insert AS INSERT INTO sharded_table VALUES ('adam', 1);
PREPARE sharded_update AS UPDATE sharded_table SET name = 'bob' WHERE id = 1;
PREPARE sharded_delete AS DELETE FROM sharded_table WHERE id = 1;
PREPARE sharded_query AS SELECT name FROM sharded_table WHERE id = 1;
EXECUTE sharded_query;
name
------
(0 rows)
EXECUTE sharded_insert;
EXECUTE sharded_query;
name
------
adam
(1 row)
EXECUTE sharded_update;
EXECUTE sharded_query;
name
------
bob
(1 row)
EXECUTE sharded_delete;
EXECUTE sharded_query;
name
------
(0 rows)
-- try to drop shards with where clause
SELECT master_apply_delete_command('DELETE FROM sharded_table WHERE id > 0');
ERROR: cannot delete from hash distributed table with this command
DETAIL: Delete statements on hash-partitioned tables are not supported with master_apply_delete_command.
HINT: Use the DELETE command instead.
-- drop all shards
SELECT master_apply_delete_command('DELETE FROM sharded_table');
ERROR: cannot delete from hash distributed table with this command
DETAIL: Delete statements on hash-partitioned tables are not supported with master_apply_delete_command.
HINT: Use the DELETE command instead.
-- lock shard metadata: take some share locks and exclusive locks
BEGIN;
SELECT lock_shard_metadata(5, ARRAY[999001, 999002, 999002]);
lock_shard_metadata
---------------------
(1 row)
SELECT lock_shard_metadata(7, ARRAY[999001, 999003, 999004]);
lock_shard_metadata
---------------------
(1 row)
SELECT locktype, objid, mode, granted
FROM pg_locks
WHERE objid IN (999001, 999002, 999003, 999004)
ORDER BY objid, mode;
locktype | objid | mode | granted
----------+--------+---------------+---------
advisory | 999001 | ExclusiveLock | t
advisory | 999001 | ShareLock | t
advisory | 999002 | ShareLock | t
advisory | 999003 | ExclusiveLock | t
advisory | 999004 | ExclusiveLock | t
(5 rows)
END;
-- lock shard metadata: unsupported lock type
SELECT lock_shard_metadata(0, ARRAY[990001, 999002]);
ERROR: unsupported lockmode 0
-- lock shard metadata: invalid shard ID
SELECT lock_shard_metadata(5, ARRAY[0]);
lock_shard_metadata
---------------------
(1 row)
-- lock shard metadata: lock nothing
SELECT lock_shard_metadata(5, ARRAY[]::bigint[]);
ERROR: no locks specified
-- lock shard resources: take some share locks and exclusive locks
BEGIN;
SELECT lock_shard_resources(5, ARRAY[999001, 999002, 999002]);
lock_shard_resources
----------------------
(1 row)
SELECT lock_shard_resources(7, ARRAY[999001, 999003, 999004]);
lock_shard_resources
----------------------
(1 row)
SELECT locktype, objid, mode, granted
FROM pg_locks
WHERE objid IN (999001, 999002, 999003, 999004)
ORDER BY objid, mode;
locktype | objid | mode | granted
----------+--------+---------------+---------
advisory | 999001 | ExclusiveLock | t
advisory | 999001 | ShareLock | t
advisory | 999002 | ShareLock | t
advisory | 999003 | ExclusiveLock | t
advisory | 999004 | ExclusiveLock | t
(5 rows)
END;
-- lock shard metadata: unsupported lock type
SELECT lock_shard_resources(0, ARRAY[990001, 999002]);
ERROR: unsupported lockmode 0
-- lock shard metadata: invalid shard ID
SELECT lock_shard_resources(5, ARRAY[-1]);
lock_shard_resources
----------------------
(1 row)
-- lock shard metadata: lock nothing
SELECT lock_shard_resources(5, ARRAY[]::bigint[]);
ERROR: no locks specified
-- drop table
DROP TABLE sharded_table;
-- VACUUM tests
-- create a table with a single shard (for convenience)
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
CREATE TABLE dustbunnies (id integer, name text, age integer);
SELECT create_distributed_table('dustbunnies', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)
-- add some data to the distributed table
\copy dustbunnies (id, name) from stdin with csv
CREATE TABLE second_dustbunnies(id integer, name text, age integer);
SELECT master_create_distributed_table('second_dustbunnies', 'id', 'hash');
master_create_distributed_table
---------------------------------
(1 row)
SELECT master_create_worker_shards('second_dustbunnies', 1, 2);
master_create_worker_shards
-----------------------------
(1 row)
-- following approach adapted from PostgreSQL's stats.sql file
-- save relevant stat counter values in refreshable view
\c - - - :worker_1_port
CREATE MATERIALIZED VIEW prevcounts AS
SELECT analyze_count, vacuum_count FROM pg_stat_user_tables
WHERE relname='dustbunnies_990002';
-- create function that sleeps until those counters increment
create function wait_for_stats() returns void as $$
declare
start_time timestamptz := clock_timestamp();
analyze_updated bool;
vacuum_updated bool;
begin
-- we don't want to wait forever; loop will exit after 10 seconds
for i in 1 .. 100 loop
-- check to see if analyze has been updated
SELECT (st.analyze_count >= pc.analyze_count + 1) INTO analyze_updated
FROM pg_stat_user_tables AS st, pg_class AS cl, prevcounts AS pc
WHERE st.relname='dustbunnies_990002' AND cl.relname='dustbunnies_990002';
-- check to see if vacuum has been updated
SELECT (st.vacuum_count >= pc.vacuum_count + 1) INTO vacuum_updated
FROM pg_stat_user_tables AS st, pg_class AS cl, prevcounts AS pc
WHERE st.relname='dustbunnies_990002' AND cl.relname='dustbunnies_990002';
exit when analyze_updated or vacuum_updated;
-- wait a little
perform pg_sleep(0.1);
-- reset stats snapshot so we can test again
perform pg_stat_clear_snapshot();
end loop;
-- report time waited in postmaster log (where it won't change test output)
raise log 'wait_for_stats delayed % seconds',
extract(epoch from clock_timestamp() - start_time);
end
$$ language plpgsql;
\c - - - :worker_2_port
CREATE MATERIALIZED VIEW prevcounts AS
SELECT analyze_count, vacuum_count FROM pg_stat_user_tables
WHERE relname='dustbunnies_990001';
-- create function that sleeps until those counters increment
create function wait_for_stats() returns void as $$
declare
start_time timestamptz := clock_timestamp();
analyze_updated bool;
vacuum_updated bool;
begin
-- we don't want to wait forever; loop will exit after 10 seconds
for i in 1 .. 100 loop
-- check to see if analyze has been updated
SELECT (st.analyze_count >= pc.analyze_count + 1) INTO analyze_updated
FROM pg_stat_user_tables AS st, pg_class AS cl, prevcounts AS pc
WHERE st.relname='dustbunnies_990001' AND cl.relname='dustbunnies_990001';
-- check to see if vacuum has been updated
SELECT (st.vacuum_count >= pc.vacuum_count + 1) INTO vacuum_updated
FROM pg_stat_user_tables AS st, pg_class AS cl, prevcounts AS pc
WHERE st.relname='dustbunnies_990001' AND cl.relname='dustbunnies_990001';
exit when analyze_updated or vacuum_updated;
-- wait a little
perform pg_sleep(0.1);
-- reset stats snapshot so we can test again
perform pg_stat_clear_snapshot();
end loop;
-- report time waited in postmaster log (where it won't change test output)
raise log 'wait_for_stats delayed % seconds',
extract(epoch from clock_timestamp() - start_time);
end
$$ language plpgsql;
-- run VACUUM and ANALYZE against the table on the master
\c - - - :master_port
VACUUM dustbunnies;
ANALYZE dustbunnies;
-- verify that the VACUUM and ANALYZE ran
\c - - - :worker_1_port
SELECT wait_for_stats();
wait_for_stats
----------------
(1 row)
REFRESH MATERIALIZED VIEW prevcounts;
SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass);
pg_stat_get_vacuum_count
--------------------------
1
(1 row)
SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass);
pg_stat_get_analyze_count
---------------------------
1
(1 row)
-- get file node to verify VACUUM FULL
SELECT relfilenode AS oldnode FROM pg_class WHERE oid='dustbunnies_990002'::regclass
\gset
-- send a VACUUM FULL and a VACUUM ANALYZE
\c - - - :master_port
VACUUM (FULL) dustbunnies;
VACUUM ANALYZE dustbunnies;
-- verify that relfilenode changed
\c - - - :worker_1_port
SELECT relfilenode != :oldnode AS table_rewritten FROM pg_class
WHERE oid='dustbunnies_990002'::regclass;
table_rewritten
-----------------
t
(1 row)
-- verify the VACUUM ANALYZE incremented both vacuum and analyze counts
SELECT wait_for_stats();
wait_for_stats
----------------
(1 row)
SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass);
pg_stat_get_vacuum_count
--------------------------
2
(1 row)
SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass);
pg_stat_get_analyze_count
---------------------------
2
(1 row)
-- disable auto-VACUUM for next test
ALTER TABLE dustbunnies_990002 SET (autovacuum_enabled = false);
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid='dustbunnies_990002'::regclass
\gset
-- send a VACUUM FREEZE after adding a new row
\c - - - :master_port
INSERT INTO dustbunnies VALUES (5, 'peter');
VACUUM (FREEZE) dustbunnies;
-- verify that relfrozenxid increased
\c - - - :worker_1_port
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
WHERE oid='dustbunnies_990002'::regclass;
frozen_performed
------------------
t
(1 row)
-- check there are no nulls in either column
SELECT attname, null_frac FROM pg_stats
WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
attname | null_frac
---------+-----------
age | 1
id | 0
name | 0
(3 rows)
-- add NULL values, then perform column-specific ANALYZE
\c - - - :master_port
INSERT INTO dustbunnies VALUES (6, NULL, NULL);
ANALYZE dustbunnies (name);
-- verify that name's NULL ratio is updated but age's is not
\c - - - :worker_1_port
SELECT attname, null_frac FROM pg_stats
WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
attname | null_frac
---------+-----------
age | 1
id | 0
name | 0.166667
(3 rows)
\c - - - :master_port
-- verify warning for unqualified VACUUM
VACUUM;
WARNING: not propagating VACUUM command to worker nodes
HINT: Provide a specific table in order to VACUUM distributed tables.
-- check for multiple table vacuum
VACUUM dustbunnies, second_dustbunnies;
ERROR: syntax error at or near ","
LINE 1: VACUUM dustbunnies, second_dustbunnies;
^
-- check the current number of vacuum and analyze run on dustbunnies
SELECT run_command_on_workers($$SELECT wait_for_stats()$$);
run_command_on_workers
------------------------
(localhost,57637,t,"")
(localhost,57638,t,"")
(2 rows)
SELECT run_command_on_workers($$SELECT pg_stat_get_vacuum_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$);
run_command_on_workers
------------------------
(localhost,57637,t,3)
(localhost,57638,t,3)
(2 rows)
SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$);
run_command_on_workers
------------------------
(localhost,57637,t,3)
(localhost,57638,t,3)
(2 rows)
-- and warning when using targeted VACUUM without DDL propagation
SET citus.enable_ddl_propagation to false;
VACUUM dustbunnies;
WARNING: not propagating VACUUM command to worker nodes
HINT: Set citus.enable_ddl_propagation to true in order to send targeted VACUUM commands to worker nodes.
ANALYZE dustbunnies;
WARNING: not propagating ANALYZE command to worker nodes
HINT: Set citus.enable_ddl_propagation to true in order to send targeted ANALYZE commands to worker nodes.
SET citus.enable_ddl_propagation to DEFAULT;
-- should not propagate the vacuum and analyze
SELECT run_command_on_workers($$SELECT wait_for_stats()$$);
run_command_on_workers
------------------------
(localhost,57637,t,"")
(localhost,57638,t,"")
(2 rows)
SELECT run_command_on_workers($$SELECT pg_stat_get_vacuum_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$);
run_command_on_workers
------------------------
(localhost,57637,t,3)
(localhost,57638,t,3)
(2 rows)
SELECT run_command_on_workers($$SELECT pg_stat_get_analyze_count(tablename::regclass) from pg_tables where tablename LIKE 'dustbunnies_%' limit 1$$);
run_command_on_workers
------------------------
(localhost,57637,t,3)
(localhost,57638,t,3)
(2 rows)
-- test worker_hash
SELECT worker_hash(123);
worker_hash
-------------
-205084363
(1 row)
SELECT worker_hash('1997-08-08'::date);
worker_hash
-------------
-499701663
(1 row)
-- test a custom type (this test should run after multi_data_types)
SELECT worker_hash('(1, 2)');
ERROR: cannot find a hash function for the input type
HINT: Cast input to a data type with a hash function.
SELECT worker_hash('(1, 2)'::test_composite_type);
worker_hash
-------------
-1895345704
(1 row)
SELECT citus_truncate_trigger();
ERROR: must be called as trigger
-- confirm that citus_create_restore_point works
SELECT 1 FROM citus_create_restore_point('regression-test');
?column?
----------
1
(1 row)

View File

@ -5,14 +5,6 @@
-- Citus features: simple selects, aggregates, joins, outer joins
-- router queries, single row inserts, multi row inserts via insert
-- into select, multi row insert via copy commands.
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
t
(1 row)
SELECT count(*) FROM lineitem_hash_part;
count
-------
@ -323,10 +315,10 @@ SELECT * FROM lineitems_by_shipping_method ORDER BY 1,2 LIMIT 5;
-- create a view with group by on partition column
CREATE VIEW lineitems_by_orderkey AS
SELECT
l_orderkey, count(*)
FROM
lineitem_hash_part
SELECT
l_orderkey, count(*)
FROM
lineitem_hash_part
GROUP BY 1;
-- this should work since we're able to push down this query
SELECT * FROM lineitems_by_orderkey ORDER BY 2 DESC, 1 ASC LIMIT 10;
@ -363,7 +355,7 @@ DROP VIEW priority_orders;
CREATE VIEW recent_users AS
SELECT user_id, max(time) as lastseen FROM users_table
GROUP BY user_id
HAVING max(time) > '2017-11-23 16:20:33.264457'::timestamp order by 2 DESC;
HAVING max(time) > '2017-11-23 16:20:33.264457'::timestamp order by 2 DESC;
SELECT * FROM recent_users ORDER BY 2 DESC, 1 DESC;
user_id | lastseen
---------+---------------------------------
@ -390,8 +382,8 @@ SELECT count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.us
(1 row)
-- count number of events of per recent users order by count
SELECT ru.user_id, count(*)
FROM recent_users ru
SELECT ru.user_id, count(*)
FROM recent_users ru
JOIN events_table et
ON (ru.user_id = et.user_id)
GROUP BY ru.user_id
@ -404,8 +396,8 @@ SELECT ru.user_id, count(*)
(3 rows)
-- the same query with a left join however, it would still generate the same result
SELECT ru.user_id, count(*)
FROM recent_users ru
SELECT ru.user_id, count(*)
FROM recent_users ru
LEFT JOIN events_table et
ON (ru.user_id = et.user_id)
GROUP BY ru.user_id
@ -419,8 +411,8 @@ SELECT ru.user_id, count(*)
-- query wrapped inside a subquery, it needs another top level order by
SELECT * FROM
(SELECT ru.user_id, count(*)
FROM recent_users ru
(SELECT ru.user_id, count(*)
FROM recent_users ru
JOIN events_table et
ON (ru.user_id = et.user_id)
GROUP BY ru.user_id
@ -436,8 +428,8 @@ ORDER BY 2 DESC, 1;
-- non-partition key joins are supported inside subquery
-- via pull-push execution
SELECT * FROM
(SELECT ru.user_id, count(*)
FROM recent_users ru
(SELECT ru.user_id, count(*)
FROM recent_users ru
JOIN events_table et
ON (ru.user_id = et.event_type)
GROUP BY ru.user_id
@ -463,7 +455,7 @@ SELECT ru.user_id FROM recent_users ru JOIN recent_events re USING(user_id) GROU
-- recent_events who are not done by recent users
SELECT count(*) FROM (
SELECT re.*, ru.user_id AS recent_user
FROM recent_events re LEFT JOIN recent_users ru USING(user_id)) reu
FROM recent_events re LEFT JOIN recent_users ru USING(user_id)) reu
WHERE recent_user IS NULL;
count
-------
@ -576,7 +568,7 @@ SELECT count(*) FROM events_table et WHERE et.user_id IN (SELECT user_id FROM re
(1 row)
-- union between views is supported through recursive planning
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users)
ORDER BY 1;
@ -593,7 +585,7 @@ ORDER BY 1;
-- wrapping it inside a SELECT * works
SELECT *
FROM (
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0
@ -606,7 +598,7 @@ SELECT *
-- union all also works for views
SELECT *
FROM (
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION ALL
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0
@ -619,7 +611,7 @@ SELECT *
SELECT count(*)
FROM (
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0;
@ -631,7 +623,7 @@ SELECT count(*)
-- UNION ALL between views is supported through recursive planning
SELECT count(*)
FROM (
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION ALL
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0;
@ -646,7 +638,7 @@ SELECT count(*)
(SELECT user_id FROM (SELECT user_id, max(time) as lastseen FROM users_table
GROUP BY user_id
HAVING max(time) > '2017-11-22 05:45:49.978738'::timestamp order by 2 DESC) aa
)
)
UNION
(SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u
WHERE user_id < 2 AND user_id > 0;
@ -660,7 +652,7 @@ SELECT count(*)
(SELECT user_id FROM (SELECT user_id, max(time) as lastseen FROM users_table
GROUP BY user_id
HAVING max(time) > '2017-11-22 05:45:49.978738'::timestamp order by 2 DESC) aa
)
)
UNION ALL
(SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u
WHERE user_id < 2 AND user_id > 0;
@ -815,7 +807,7 @@ EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER
Sort Key: remote_scan.user_id
-> HashAggregate
Group Key: remote_scan.user_id
-> Custom Scan (Citus Real-Time)
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
@ -836,7 +828,7 @@ EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER
EXPLAIN (COSTS FALSE) SELECT *
FROM (
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 4 AND user_id > 1
@ -845,7 +837,7 @@ EXPLAIN (COSTS FALSE) SELECT *
-------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: remote_scan.user_id
-> Custom Scan (Citus Real-Time)
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
@ -874,14 +866,14 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN
Limit
-> Sort
Sort Key: remote_scan."time" DESC
-> Custom Scan (Citus Real-Time)
-> Custom Scan (Citus Adaptive)
-> Distributed Subplan 98_1
-> Limit
-> Sort
Sort Key: max((max(remote_scan.lastseen))) DESC
-> HashAggregate
Group Key: remote_scan.user_id
-> Custom Scan (Citus Real-Time)
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task
@ -913,7 +905,7 @@ EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USIN
Limit
-> Sort
Sort Key: remote_scan."time" DESC
-> Custom Scan (Citus Real-Time)
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
-> Task

View File

@ -1,949 +0,0 @@
--
-- MULTI_VIEW
--
-- This file contains test cases for view support. It verifies various
-- Citus features: simple selects, aggregates, joins, outer joins
-- router queries, single row inserts, multi row inserts via insert
-- into select, multi row insert via copy commands.
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
f
(1 row)
SELECT count(*) FROM lineitem_hash_part;
count
-------
12000
(1 row)
SELECT count(*) FROM orders_hash_part;
count
-------
2985
(1 row)
-- create a view for priority orders
CREATE VIEW priority_orders AS SELECT * FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM';
-- aggregate pushdown
SELECT o_orderpriority, count(*) FROM priority_orders GROUP BY 1 ORDER BY 2, 1;
o_orderpriority | count
-----------------+-------
2-HIGH | 593
1-URGENT | 604
(2 rows)
SELECT o_orderpriority, count(*) FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM' GROUP BY 1 ORDER BY 2,1;
o_orderpriority | count
-----------------+-------
2-HIGH | 593
1-URGENT | 604
(2 rows)
-- filters
SELECT o_orderpriority, count(*) as all, count(*) FILTER (WHERE o_orderstatus ='F') as fullfilled FROM priority_orders GROUP BY 1 ORDER BY 2, 1;
o_orderpriority | all | fullfilled
-----------------+-----+------------
2-HIGH | 593 | 271
1-URGENT | 604 | 280
(2 rows)
-- having
SELECT o_orderdate, count(*) from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc;
o_orderdate | count
-------------+-------
08-20-1996 | 5
10-10-1994 | 4
05-05-1994 | 4
04-07-1994 | 4
03-17-1993 | 4
(5 rows)
-- having with filters
SELECT o_orderdate, count(*) as all, count(*) FILTER(WHERE o_orderstatus = 'F') from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc;
o_orderdate | all | count
-------------+-----+-------
08-20-1996 | 5 | 0
10-10-1994 | 4 | 4
05-05-1994 | 4 | 4
04-07-1994 | 4 | 4
03-17-1993 | 4 | 4
(5 rows)
-- limit
SELECT o_orderkey, o_totalprice from orders_hash_part order by 2 desc, 1 asc limit 5 ;
o_orderkey | o_totalprice
------------+--------------
4421 | 401055.62
10209 | 400191.77
11142 | 395039.05
14179 | 384265.43
11296 | 378166.33
(5 rows)
SELECT o_orderkey, o_totalprice from priority_orders order by 2 desc, 1 asc limit 1 ;
o_orderkey | o_totalprice
------------+--------------
14179 | 384265.43
(1 row)
CREATE VIEW priority_lineitem AS SELECT li.* FROM lineitem_hash_part li JOIN priority_orders ON (l_orderkey = o_orderkey);
SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1 LIMIT 5;
l_orderkey | count
------------+-------
7 | 7
225 | 7
226 | 7
322 | 7
326 | 7
(5 rows)
CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR';
-- join between view and table
SELECT count(*) FROM orders_hash_part join air_shipped_lineitems ON (o_orderkey = l_orderkey);
count
-------
1706
(1 row)
-- join between views
SELECT count(*) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey);
count
-------
700
(1 row)
-- count distinct on partition column is supported
SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey);
count
-------
551
(1 row)
-- count distinct on non-partition column is supported
SELECT count(distinct o_orderpriority) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey);
count
-------
2
(1 row)
-- count distinct on partition column is supported on router queries
SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems
ON (o_orderkey = l_orderkey)
WHERE (o_orderkey = 231);
count
-------
1
(1 row)
-- select distinct on router joins of views also works
SELECT distinct(o_orderkey) FROM priority_orders join air_shipped_lineitems
ON (o_orderkey = l_orderkey)
WHERE (o_orderkey = 231);
o_orderkey
------------
231
(1 row)
-- left join support depends on flattening of the query
SELECT o_orderkey, l_orderkey FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey) ORDER BY o_orderkey LIMIT 1;
o_orderkey | l_orderkey
------------+------------
2 |
(1 row)
-- however, this works
SELECT count(*) FROM priority_orders left join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR';
count
-------
700
(1 row)
-- view on the inner side is supported
SELECT count(*) FROM priority_orders right join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR';
count
-------
1706
(1 row)
-- view on the outer side is supported
SELECT count(*) FROM lineitem_hash_part right join priority_orders ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR';
count
-------
700
(1 row)
-- left join on router query is supported
SELECT o_orderkey, l_linenumber FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey)
WHERE o_orderkey = 2;
o_orderkey | l_linenumber
------------+--------------
2 |
(1 row)
-- repartition query on view join
-- it passes planning, fails at execution stage
SET client_min_messages TO DEBUG1;
SELECT * FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey) ORDER BY o_orderkey DESC, o_custkey DESC, o_orderpriority DESC LIMIT 5;
DEBUG: generating subplan 22_1 for subquery SELECT lineitem_hash_part.l_orderkey, lineitem_hash_part.l_partkey, lineitem_hash_part.l_suppkey, lineitem_hash_part.l_linenumber, lineitem_hash_part.l_quantity, lineitem_hash_part.l_extendedprice, lineitem_hash_part.l_discount, lineitem_hash_part.l_tax, lineitem_hash_part.l_returnflag, lineitem_hash_part.l_linestatus, lineitem_hash_part.l_shipdate, lineitem_hash_part.l_commitdate, lineitem_hash_part.l_receiptdate, lineitem_hash_part.l_shipinstruct, lineitem_hash_part.l_shipmode, lineitem_hash_part.l_comment FROM public.lineitem_hash_part WHERE (lineitem_hash_part.l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar)
DEBUG: Plan 22 query after replacing subqueries and CTEs: SELECT priority_orders.o_orderkey, priority_orders.o_custkey, priority_orders.o_orderstatus, priority_orders.o_totalprice, priority_orders.o_orderdate, priority_orders.o_orderpriority, priority_orders.o_clerk, priority_orders.o_shippriority, priority_orders.o_comment, air_shipped_lineitems.l_orderkey, air_shipped_lineitems.l_partkey, air_shipped_lineitems.l_suppkey, air_shipped_lineitems.l_linenumber, air_shipped_lineitems.l_quantity, air_shipped_lineitems.l_extendedprice, air_shipped_lineitems.l_discount, air_shipped_lineitems.l_tax, air_shipped_lineitems.l_returnflag, air_shipped_lineitems.l_linestatus, air_shipped_lineitems.l_shipdate, air_shipped_lineitems.l_commitdate, air_shipped_lineitems.l_receiptdate, air_shipped_lineitems.l_shipinstruct, air_shipped_lineitems.l_shipmode, air_shipped_lineitems.l_comment FROM ((SELECT orders_hash_part.o_orderkey, orders_hash_part.o_custkey, orders_hash_part.o_orderstatus, orders_hash_part.o_totalprice, orders_hash_part.o_orderdate, orders_hash_part.o_orderpriority, orders_hash_part.o_clerk, orders_hash_part.o_shippriority, orders_hash_part.o_comment FROM public.orders_hash_part WHERE (orders_hash_part.o_orderpriority OPERATOR(pg_catalog.<) '3-MEDIUM'::bpchar)) priority_orders JOIN (SELECT intermediate_result.l_orderkey, intermediate_result.l_partkey, intermediate_result.l_suppkey, intermediate_result.l_linenumber, intermediate_result.l_quantity, intermediate_result.l_extendedprice, intermediate_result.l_discount, intermediate_result.l_tax, intermediate_result.l_returnflag, intermediate_result.l_linestatus, intermediate_result.l_shipdate, intermediate_result.l_commitdate, intermediate_result.l_receiptdate, intermediate_result.l_shipinstruct, intermediate_result.l_shipmode, intermediate_result.l_comment FROM read_intermediate_result('22_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint, l_partkey integer, l_suppkey integer, l_linenumber integer, l_quantity numeric(15,2), l_extendedprice numeric(15,2), l_discount numeric(15,2), l_tax numeric(15,2), l_returnflag character(1), l_linestatus character(1), l_shipdate date, l_commitdate date, l_receiptdate date, l_shipinstruct character(25), l_shipmode character(10), l_comment character varying(44))) air_shipped_lineitems ON ((priority_orders.o_custkey OPERATOR(pg_catalog.=) air_shipped_lineitems.l_suppkey))) ORDER BY priority_orders.o_orderkey DESC, priority_orders.o_custkey DESC, priority_orders.o_orderpriority DESC LIMIT 5
DEBUG: push down of limit count: 5
o_orderkey | o_custkey | o_orderstatus | o_totalprice | o_orderdate | o_orderpriority | o_clerk | o_shippriority | o_comment | l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment
------------+-----------+---------------+--------------+-------------+-----------------+-----------------+----------------+-------------------------------------------------------+------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+-------------------------------------------
14821 | 1435 | O | 322002.95 | 06-12-1998 | 2-HIGH | Clerk#000000630 | 0 | n packages are furiously ironic ideas. d | 1607 | 118923 | 1435 | 2 | 37.00 | 71851.04 | 0.05 | 0.02 | N | O | 02-27-1996 | 02-18-1996 | 03-16-1996 | NONE | AIR | alongside
14790 | 613 | O | 270163.54 | 08-21-1996 | 2-HIGH | Clerk#000000347 | 0 | p. regular deposits wake. final n | 2629 | 123076 | 613 | 2 | 31.00 | 34071.17 | 0.08 | 0.03 | N | O | 05-24-1998 | 05-26-1998 | 06-10-1998 | COLLECT COD | AIR | ate blithely bold, regular deposits. bold
14758 | 1225 | F | 37812.49 | 10-27-1993 | 2-HIGH | Clerk#000000687 | 0 | ages nag about the furio | 9156 | 176190 | 1225 | 2 | 22.00 | 27856.18 | 0.03 | 0.00 | R | F | 02-08-1994 | 04-01-1994 | 02-24-1994 | DELIVER IN PERSON | AIR | equests dete
14725 | 569 | O | 261801.45 | 06-17-1995 | 2-HIGH | Clerk#000000177 | 0 | ng asymptotes. final, ironic accounts cajole after | 14688 | 173017 | 569 | 3 | 10.00 | 10900.10 | 0.02 | 0.08 | N | O | 03-14-1997 | 04-22-1997 | 04-05-1997 | COLLECT COD | AIR | riously even packages sleep a
14657 | 370 | F | 116160.53 | 02-28-1994 | 1-URGENT | Clerk#000000756 | 0 | ly across the ironic, ironic instructions. bold ideas | 5153 | 67863 | 370 | 3 | 30.00 | 54925.80 | 0.09 | 0.01 | N | O | 11-10-1995 | 11-14-1995 | 11-16-1995 | DELIVER IN PERSON | AIR | beans sleep bl
(5 rows)
RESET client_min_messages;
SELECT count(*) FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey);
count
-------
192
(1 row)
-- materialized views work
-- insert into... select works with views
CREATE TABLE temp_lineitem(LIKE lineitem_hash_part);
SELECT create_distributed_table('temp_lineitem', 'l_orderkey', 'hash', 'lineitem_hash_part');
create_distributed_table
--------------------------
(1 row)
INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems;
SELECT count(*) FROM temp_lineitem;
count
-------
1706
(1 row)
-- following is a where false query, should not be inserting anything
INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems WHERE l_shipmode = 'MAIL';
SELECT count(*) FROM temp_lineitem;
count
-------
1706
(1 row)
-- can create and query materialized views
CREATE MATERIALIZED VIEW mode_counts
AS SELECT l_shipmode, count(*) FROM temp_lineitem GROUP BY l_shipmode;
SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10;
l_shipmode | count
------------+-------
AIR | 1706
(1 row)
-- materialized views are local, cannot join with distributed tables
SELECT count(*) FROM mode_counts JOIN temp_lineitem USING (l_shipmode);
ERROR: relation mode_counts is not distributed
-- new data is not immediately reflected in the view
INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems;
SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10;
l_shipmode | count
------------+-------
AIR | 1706
(1 row)
-- refresh updates the materialised view with new data
REFRESH MATERIALIZED VIEW mode_counts;
SELECT * FROM mode_counts WHERE l_shipmode = 'AIR' ORDER BY 2 DESC, 1 LIMIT 10;
l_shipmode | count
------------+-------
AIR | 3412
(1 row)
DROP MATERIALIZED VIEW mode_counts;
SET citus.task_executor_type to "task-tracker";
-- single view repartition subqueries are not supported
SELECT l_suppkey, count(*) FROM
(SELECT l_suppkey, l_shipdate, count(*)
FROM air_shipped_lineitems GROUP BY l_suppkey, l_shipdate) supps
GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5;
ERROR: cannot perform distributed planning on this query
DETAIL: Subqueries without group by clause are not supported yet
-- logically same query without a view works fine
SELECT l_suppkey, count(*) FROM
(SELECT l_suppkey, l_shipdate, count(*)
FROM lineitem_hash_part WHERE l_shipmode = 'AIR' GROUP BY l_suppkey, l_shipdate) supps
GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5;
l_suppkey | count
-----------+-------
7680 | 4
160 | 3
1042 | 3
1318 | 3
5873 | 3
(5 rows)
-- when a view is replaced by actual query it still fails
SELECT l_suppkey, count(*) FROM
(SELECT l_suppkey, l_shipdate, count(*)
FROM (SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR') asi
GROUP BY l_suppkey, l_shipdate) supps
GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5;
ERROR: cannot perform distributed planning on this query
DETAIL: Subqueries without group by clause are not supported yet
-- repartition query on view with single table subquery
CREATE VIEW supp_count_view AS SELECT * FROM (SELECT l_suppkey, count(*) FROM lineitem_hash_part GROUP BY 1) s1;
SELECT * FROM supp_count_view ORDER BY 2 DESC, 1 LIMIT 10;
l_suppkey | count
-----------+-------
6104 | 8
1868 | 6
5532 | 6
5849 | 6
6169 | 6
6669 | 6
6692 | 6
7703 | 6
7869 | 6
8426 | 6
(10 rows)
SET citus.task_executor_type to DEFAULT;
-- create a view with aggregate
CREATE VIEW lineitems_by_shipping_method AS
SELECT l_shipmode, count(*) as cnt FROM lineitem_hash_part GROUP BY 1;
-- following will be supported via recursive planning
SELECT * FROM lineitems_by_shipping_method ORDER BY 1,2 LIMIT 5;
l_shipmode | cnt
------------+------
AIR | 1706
FOB | 1709
MAIL | 1739
RAIL | 1706
REG AIR | 1679
(5 rows)
-- create a view with group by on partition column
CREATE VIEW lineitems_by_orderkey AS
SELECT
l_orderkey, count(*)
FROM
lineitem_hash_part
GROUP BY 1;
-- this should work since we're able to push down this query
SELECT * FROM lineitems_by_orderkey ORDER BY 2 DESC, 1 ASC LIMIT 10;
l_orderkey | count
------------+-------
7 | 7
68 | 7
129 | 7
164 | 7
194 | 7
225 | 7
226 | 7
322 | 7
326 | 7
354 | 7
(10 rows)
-- it would also work since it is made router plannable
SELECT * FROM lineitems_by_orderkey WHERE l_orderkey = 100;
l_orderkey | count
------------+-------
100 | 5
(1 row)
DROP TABLE temp_lineitem CASCADE;
DROP VIEW supp_count_view;
DROP VIEW lineitems_by_orderkey;
DROP VIEW lineitems_by_shipping_method;
DROP VIEW air_shipped_lineitems;
DROP VIEW priority_lineitem;
DROP VIEW priority_orders;
-- new tests for real time use case including views and subqueries
-- create view to display recent user who has an activity after a timestamp
CREATE VIEW recent_users AS
SELECT user_id, max(time) as lastseen FROM users_table
GROUP BY user_id
HAVING max(time) > '2017-11-23 16:20:33.264457'::timestamp order by 2 DESC;
SELECT * FROM recent_users ORDER BY 2 DESC, 1 DESC;
user_id | lastseen
---------+---------------------------------
1 | Thu Nov 23 17:30:34.635085 2017
3 | Thu Nov 23 17:18:51.048758 2017
5 | Thu Nov 23 16:48:32.08896 2017
(3 rows)
-- create a view for recent_events
CREATE VIEW recent_events AS
SELECT user_id, time FROM events_table
WHERE time > '2017-11-23 16:20:33.264457'::timestamp;
SELECT count(*) FROM recent_events;
count
-------
6
(1 row)
-- count number of events of recent_users
SELECT count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id);
count
-------
50
(1 row)
-- count number of events of per recent users order by count
SELECT ru.user_id, count(*)
FROM recent_users ru
JOIN events_table et
ON (ru.user_id = et.user_id)
GROUP BY ru.user_id
ORDER BY 2 DESC, 1;
user_id | count
---------+-------
3 | 21
1 | 15
5 | 14
(3 rows)
-- the same query with a left join however, it would still generate the same result
SELECT ru.user_id, count(*)
FROM recent_users ru
LEFT JOIN events_table et
ON (ru.user_id = et.user_id)
GROUP BY ru.user_id
ORDER BY 2 DESC, 1;
user_id | count
---------+-------
3 | 21
1 | 15
5 | 14
(3 rows)
-- query wrapped inside a subquery, it needs another top level order by
SELECT * FROM
(SELECT ru.user_id, count(*)
FROM recent_users ru
JOIN events_table et
ON (ru.user_id = et.user_id)
GROUP BY ru.user_id
ORDER BY 2 DESC, 1) s1
ORDER BY 2 DESC, 1;
user_id | count
---------+-------
3 | 21
1 | 15
5 | 14
(3 rows)
-- non-partition key joins are supported inside subquery
-- via pull-push execution
SELECT * FROM
(SELECT ru.user_id, count(*)
FROM recent_users ru
JOIN events_table et
ON (ru.user_id = et.event_type)
GROUP BY ru.user_id
ORDER BY 2 DESC, 1) s1
ORDER BY 2 DESC, 1;
user_id | count
---------+-------
1 | 24
3 | 23
5 | 7
(3 rows)
-- join between views
-- recent users who has an event in recent events
SELECT ru.user_id FROM recent_users ru JOIN recent_events re USING(user_id) GROUP BY ru.user_id ORDER BY ru.user_id;
user_id
---------
1
3
(2 rows)
-- outer join inside a subquery
-- recent_events who are not done by recent users
SELECT count(*) FROM (
SELECT re.*, ru.user_id AS recent_user
FROM recent_events re LEFT JOIN recent_users ru USING(user_id)) reu
WHERE recent_user IS NULL;
count
-------
2
(1 row)
-- same query with anti-join
SELECT count(*)
FROM recent_events re LEFT JOIN recent_users ru ON(ru.user_id = re.user_id)
WHERE ru.user_id IS NULL;
count
-------
2
(1 row)
-- join between view and table
-- users who has recent activity and they have an entry with value_1 is less than 3
SELECT ut.* FROM recent_users ru JOIN users_table ut USING (user_id) WHERE ut.value_1 < 3 ORDER BY 1,2;
user_id | time | value_1 | value_2 | value_3 | value_4
---------+---------------------------------+---------+---------+---------+---------
1 | Thu Nov 23 09:26:42.145043 2017 | 1 | 3 | 3 |
3 | Wed Nov 22 18:43:51.450263 2017 | 1 | 1 | 4 |
3 | Wed Nov 22 20:43:31.008625 2017 | 1 | 3 | 2 |
3 | Thu Nov 23 00:15:45.610845 2017 | 1 | 1 | 4 |
3 | Thu Nov 23 03:23:24.702501 2017 | 1 | 2 | 5 |
3 | Thu Nov 23 06:20:05.854857 2017 | 1 | 4 | 2 |
3 | Thu Nov 23 09:57:41.540228 2017 | 2 | 2 | 3 |
3 | Thu Nov 23 11:18:53.114408 2017 | 2 | 2 | 0 |
3 | Thu Nov 23 12:56:49.29191 2017 | 0 | 5 | 1 |
3 | Thu Nov 23 17:18:51.048758 2017 | 1 | 5 | 5 |
5 | Wed Nov 22 20:43:18.667473 2017 | 0 | 3 | 2 |
5 | Wed Nov 22 21:02:07.575129 2017 | 2 | 0 | 2 |
5 | Wed Nov 22 22:10:24.315371 2017 | 1 | 2 | 1 |
5 | Thu Nov 23 00:54:44.192608 2017 | 1 | 3 | 2 |
5 | Thu Nov 23 07:47:09.542999 2017 | 1 | 4 | 3 |
5 | Thu Nov 23 09:05:08.53142 2017 | 2 | 2 | 2 |
5 | Thu Nov 23 09:17:47.706703 2017 | 2 | 5 | 3 |
5 | Thu Nov 23 10:15:31.764558 2017 | 2 | 2 | 2 |
5 | Thu Nov 23 14:29:02.557934 2017 | 2 | 1 | 2 |
5 | Thu Nov 23 15:55:08.493462 2017 | 0 | 3 | 3 |
5 | Thu Nov 23 16:28:38.455322 2017 | 2 | 5 | 4 |
(21 rows)
-- determine if a recent user has done a given event type or not
SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done_event
FROM recent_users ru
LEFT JOIN events_table et
ON(ru.user_id = et.user_id AND et.event_type = 6)
ORDER BY 2 DESC, 1;
user_id | done_event
---------+------------
1 | YES
3 | NO
5 | NO
(3 rows)
-- view vs table join wrapped inside a subquery
SELECT * FROM
(SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done_event
FROM recent_users ru
LEFT JOIN events_table et
ON(ru.user_id = et.user_id AND et.event_type = 6)
) s1
ORDER BY 2 DESC, 1;
user_id | done_event
---------+------------
1 | YES
3 | NO
5 | NO
(3 rows)
-- event vs table non-partition-key join is not supported
-- given that we cannot recursively plan tables yet
SELECT * FROM
(SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done_event
FROM recent_users ru
LEFT JOIN events_table et
ON(ru.user_id = et.event_type)
) s1
ORDER BY 2 DESC, 1;
ERROR: cannot pushdown the subquery
DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join
-- create a select only view
CREATE VIEW selected_users AS SELECT * FROM users_table WHERE value_1 >= 1 and value_1 <3;
CREATE VIEW recent_selected_users AS SELECT su.* FROM selected_users su JOIN recent_users ru USING(user_id);
SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1;
user_id
---------
1
3
5
(3 rows)
-- this would be supported when we implement where partition_key in (subquery) support
SELECT et.user_id, et.time FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users) GROUP BY 1,2 ORDER BY 1 DESC,2 DESC LIMIT 5;
user_id | time
---------+---------------------------------
5 | Thu Nov 23 16:11:02.929469 2017
5 | Thu Nov 23 14:40:40.467511 2017
5 | Thu Nov 23 14:28:51.833214 2017
5 | Thu Nov 23 14:23:09.889786 2017
5 | Thu Nov 23 13:26:45.571108 2017
(5 rows)
-- it is supported when it is a router query
SELECT count(*) FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users WHERE user_id = 1);
count
-------
15
(1 row)
-- union between views is supported through recursive planning
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users)
ORDER BY 1;
user_id
---------
1
2
3
4
5
6
(6 rows)
-- wrapping it inside a SELECT * works
SELECT *
FROM (
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0
ORDER BY user_id;
user_id
---------
1
(1 row)
-- union all also works for views
SELECT *
FROM (
(SELECT user_id FROM recent_users)
UNION ALL
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0
ORDER BY user_id;
user_id
---------
1
1
(2 rows)
SELECT count(*)
FROM (
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0;
count
-------
1
(1 row)
-- UNION ALL between views is supported through recursive planning
SELECT count(*)
FROM (
(SELECT user_id FROM recent_users)
UNION ALL
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0;
count
-------
2
(1 row)
-- expand view definitions and re-run last 2 queries
SELECT count(*)
FROM (
(SELECT user_id FROM (SELECT user_id, max(time) as lastseen FROM users_table
GROUP BY user_id
HAVING max(time) > '2017-11-22 05:45:49.978738'::timestamp order by 2 DESC) aa
)
UNION
(SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u
WHERE user_id < 2 AND user_id > 0;
count
-------
1
(1 row)
SELECT count(*)
FROM (
(SELECT user_id FROM (SELECT user_id, max(time) as lastseen FROM users_table
GROUP BY user_id
HAVING max(time) > '2017-11-22 05:45:49.978738'::timestamp order by 2 DESC) aa
)
UNION ALL
(SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u
WHERE user_id < 2 AND user_id > 0;
count
-------
2
(1 row)
-- test distinct
-- distinct is supported if it is on a partition key
CREATE VIEW distinct_user_with_value_1_3 AS SELECT DISTINCT user_id FROM users_table WHERE value_1 = 3;
SELECT * FROM distinct_user_with_value_1_3 ORDER BY user_id;
user_id
---------
1
2
3
4
5
6
(6 rows)
-- distinct is not supported if it is on a non-partition key
-- but will be supported via recursive planning
CREATE VIEW distinct_value_1 AS SELECT DISTINCT value_1 FROM users_table WHERE value_2 = 3;
SELECT * FROM distinct_value_1 ORDER BY 1 DESC LIMIT 5;
value_1
---------
5
4
3
2
1
(5 rows)
-- CTEs are supported even if they are on views
CREATE VIEW cte_view_1 AS
WITH c1 AS (SELECT * FROM users_table WHERE value_1 = 3) SELECT * FROM c1 WHERE value_2 < 4 AND EXISTS (SELECT * FROM c1);
SELECT * FROM cte_view_1 ORDER BY 1,2,3,4,5 LIMIT 5;
user_id | time | value_1 | value_2 | value_3 | value_4
---------+---------------------------------+---------+---------+---------+---------
1 | Thu Nov 23 03:32:50.803031 2017 | 3 | 2 | 1 |
2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 |
3 | Wed Nov 22 23:24:32.080584 2017 | 3 | 2 | 5 |
4 | Wed Nov 22 23:59:46.493416 2017 | 3 | 1 | 3 |
4 | Thu Nov 23 01:55:21.824618 2017 | 3 | 1 | 4 |
(5 rows)
-- this is single shard query and still not supported since it has view + cte
-- router planner can't detect it
SELECT * FROM cte_view_1 WHERE user_id = 2 ORDER BY 1,2,3,4,5;
user_id | time | value_1 | value_2 | value_3 | value_4
---------+--------------------------------+---------+---------+---------+---------
2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 |
(1 row)
-- if CTE itself prunes down to a single shard than the view is supported (router plannable)
CREATE VIEW cte_view_2 AS
WITH c1 AS (SELECT * FROM users_table WHERE user_id = 2) SELECT * FROM c1 WHERE value_1 = 3;
SELECT * FROM cte_view_2;
user_id | time | value_1 | value_2 | value_3 | value_4
---------+---------------------------------+---------+---------+---------+---------
2 | Thu Nov 23 00:19:14.138058 2017 | 3 | 4 | 0 |
2 | Thu Nov 23 13:52:54.83829 2017 | 3 | 1 | 4 |
2 | Wed Nov 22 18:19:49.944985 2017 | 3 | 5 | 1 |
2 | Thu Nov 23 11:41:04.042936 2017 | 3 | 4 | 1 |
(4 rows)
CREATE VIEW router_view AS SELECT * FROM users_table WHERE user_id = 2;
-- router plannable
SELECT user_id FROM router_view GROUP BY 1;
user_id
---------
2
(1 row)
-- join a router view
SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN recent_events USING (user_id) ORDER BY 2 LIMIT 3;
user_id | time
---------+---------------------------------
2 | Thu Nov 23 17:26:14.563216 2017
(1 row)
SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN (SELECT * FROM recent_events) re USING (user_id) ORDER BY 2 LIMIT 3;
user_id | time
---------+---------------------------------
2 | Thu Nov 23 17:26:14.563216 2017
(1 row)
-- views with limits
CREATE VIEW recent_10_users AS
SELECT user_id, max(time) as lastseen FROM users_table
GROUP BY user_id
ORDER BY lastseen DESC
LIMIT 10;
-- this is not supported since it has limit in it and subquery_pushdown is not set
SELECT * FROM recent_10_users;
user_id | lastseen
---------+---------------------------------
1 | Thu Nov 23 17:30:34.635085 2017
3 | Thu Nov 23 17:18:51.048758 2017
5 | Thu Nov 23 16:48:32.08896 2017
4 | Thu Nov 23 15:32:02.360969 2017
6 | Thu Nov 23 14:43:18.024104 2017
2 | Thu Nov 23 13:52:54.83829 2017
(6 rows)
SET citus.subquery_pushdown to ON;
-- still not supported since outer query does not have limit
-- it shows a different (subquery with single relation) error message
SELECT * FROM recent_10_users;
ERROR: cannot perform distributed planning on this query
DETAIL: Subqueries with limit are not supported yet
-- now it displays more correct error message
SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id);
ERROR: cannot push down this subquery
DETAIL: Limit in subquery without limit in the outermost query is unsupported
-- now both are supported when there is a limit on the outer most query
SELECT * FROM recent_10_users ORDER BY lastseen DESC LIMIT 10;
user_id | lastseen
---------+---------------------------------
1 | Thu Nov 23 17:30:34.635085 2017
3 | Thu Nov 23 17:18:51.048758 2017
5 | Thu Nov 23 16:48:32.08896 2017
4 | Thu Nov 23 15:32:02.360969 2017
6 | Thu Nov 23 14:43:18.024104 2017
2 | Thu Nov 23 13:52:54.83829 2017
(6 rows)
SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10;
user_id | time | event_type | value_2 | value_3 | value_4
---------+---------------------------------+------------+---------+---------+---------
1 | Thu Nov 23 21:54:46.924477 2017 | 6 | 4 | 5 |
4 | Thu Nov 23 18:10:21.338399 2017 | 1 | 2 | 4 |
3 | Thu Nov 23 18:08:26.550729 2017 | 2 | 4 | 3 |
2 | Thu Nov 23 17:26:14.563216 2017 | 1 | 5 | 3 |
3 | Thu Nov 23 16:44:41.903713 2017 | 4 | 2 | 2 |
3 | Thu Nov 23 16:31:56.219594 2017 | 5 | 1 | 2 |
4 | Thu Nov 23 16:20:33.264457 2017 | 0 | 0 | 3 |
5 | Thu Nov 23 16:11:02.929469 2017 | 4 | 2 | 0 |
2 | Thu Nov 23 15:58:49.273421 2017 | 5 | 1 | 2 |
5 | Thu Nov 23 14:40:40.467511 2017 | 1 | 4 | 1 |
(10 rows)
RESET citus.subquery_pushdown;
VACUUM ANALYZE users_table;
-- explain tests
EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: remote_scan.user_id
-> HashAggregate
Group Key: remote_scan.user_id
-> Custom Scan (Citus Real-Time)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> HashAggregate
Group Key: users_table.user_id
-> Hash Join
Hash Cond: (users_table.user_id = ru.user_id)
-> Seq Scan on users_table_1400256 users_table
Filter: ((value_1 >= 1) AND (value_1 < 3))
-> Hash
-> Subquery Scan on ru
-> Sort
Sort Key: (max(users_table_1."time")) DESC
-> HashAggregate
Group Key: users_table_1.user_id
Filter: (max(users_table_1."time") > '2017-11-23 16:20:33.264457'::timestamp without time zone)
-> Seq Scan on users_table_1400256 users_table_1
(23 rows)
EXPLAIN (COSTS FALSE) SELECT *
FROM (
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 4 AND user_id > 1
ORDER BY user_id;
QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: remote_scan.user_id
-> Custom Scan (Citus Real-Time)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> Unique
-> Sort
Sort Key: recent_users.user_id
-> Append
-> Subquery Scan on recent_users
-> Sort
Sort Key: (max(users_table."time")) DESC
-> GroupAggregate
Group Key: users_table.user_id
Filter: (max(users_table."time") > '2017-11-23 16:20:33.264457'::timestamp without time zone)
-> Sort
Sort Key: users_table.user_id
-> Seq Scan on users_table_1400256 users_table
Filter: ((user_id < 4) AND (user_id > 1))
-> Seq Scan on users_table_1400256 users_table_1
Filter: ((value_1 >= 1) AND (value_1 < 3) AND (user_id < 4) AND (user_id > 1))
(23 rows)
EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan."time" DESC
-> Custom Scan (Citus Real-Time)
-> Distributed Subplan 98_1
-> Limit
-> Sort
Sort Key: max((max(remote_scan.lastseen))) DESC
-> HashAggregate
Group Key: remote_scan.user_id
-> Custom Scan (Citus Real-Time)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> Limit
-> Sort
Sort Key: (max("time")) DESC
-> HashAggregate
Group Key: user_id
-> Seq Scan on users_table_1400256 users_table
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> Limit
-> Sort
Sort Key: et."time" DESC
-> Hash Join
Hash Cond: (intermediate_result.user_id = et.user_id)
-> Function Scan on read_intermediate_result intermediate_result
-> Hash
-> Seq Scan on events_table_1400260 et
(33 rows)
SET citus.subquery_pushdown to ON;
EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan."time" DESC
-> Custom Scan (Citus Real-Time)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> Limit
-> Sort
Sort Key: et."time" DESC
-> Hash Join
Hash Cond: (et.user_id = recent_10_users.user_id)
-> Seq Scan on events_table_1400260 et
-> Hash
-> Subquery Scan on recent_10_users
-> Limit
-> Sort
Sort Key: (max(users_table."time")) DESC
-> HashAggregate
Group Key: users_table.user_id
-> Seq Scan on users_table_1400256 users_table
(22 rows)
RESET citus.subquery_pushdown;
DROP VIEW recent_10_users;
DROP VIEW router_view;
DROP VIEW cte_view_2;
DROP VIEW cte_view_1;
DROP VIEW distinct_value_1;
DROP VIEW distinct_user_with_value_1_3;
DROP VIEW recent_selected_users;
DROP VIEW selected_users;
DROP VIEW recent_events;
DROP VIEW recent_users;

View File

@ -1,13 +1,6 @@
---
--- tests around access tracking within transaction blocks
---
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 10 AS version_ten_or_above;
version_ten_or_above
----------------------
t
(1 row)
CREATE SCHEMA access_tracking;
SET search_path TO 'access_tracking';
CREATE OR REPLACE FUNCTION relation_select_access_mode(relationId Oid)
@ -46,17 +39,17 @@ BEGIN
RETURN 'not_accessed';
ELSIF relationShardAccess = 1 THEN
RETURN 'reference_table_access';
ELSE
ELSE
RETURN 'parallel_access';
END IF;
END;
$$ LANGUAGE 'plpgsql' IMMUTABLE;
CREATE VIEW relation_acesses AS
SELECT table_name,
CREATE VIEW relation_acesses AS
SELECT table_name,
relation_access_mode_to_text(table_name, relation_select_access_mode(table_name::regclass)) as select_access,
relation_access_mode_to_text(table_name, relation_dml_access_mode(table_name::regclass)) as dml_access,
relation_access_mode_to_text(table_name, relation_ddl_access_mode(table_name::regclass)) as ddl_access
FROM
FROM
((SELECT 'table_' || i as table_name FROM generate_series(1, 7) i) UNION (SELECT 'partitioning_test') UNION (SELECT 'partitioning_test_2009') UNION (SELECT 'partitioning_test_2010')) tables;
SET citus.shard_replication_factor TO 1;
CREATE TABLE table_1 (key int, value int);
@ -136,7 +129,7 @@ SELECT * FROM relation_acesses WHERE table_name = 'table_1';
table_1 | not_parallel_accessed | not_parallel_accessed | not_parallel_accessed
(1 row)
-- a very simple test that first checks sequential
-- a very simple test that first checks sequential
-- and parallel SELECTs,DMLs, and DDLs
BEGIN;
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
@ -246,12 +239,12 @@ BEGIN;
ROLLBACK;
-- a simple join touches single shard per table
BEGIN;
SELECT
count(*)
FROM
SELECT
count(*)
FROM
table_1, table_2, table_3, table_4, table_5
WHERE
table_1.key = table_2.key AND table_2.key = table_3.key AND
table_1.key = table_2.key AND table_2.key = table_3.key AND
table_3.key = table_4.key AND table_4.key = table_5.key AND
table_1.key = 1;
count
@ -274,9 +267,9 @@ BEGIN;
ROLLBACK;
-- a simple real-time join touches all shard per table
BEGIN;
SELECT
count(*)
FROM
SELECT
count(*)
FROM
table_1, table_2
WHERE
table_1.key = table_2.key;
@ -297,9 +290,9 @@ ROLLBACK;
-- in sequential mode
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT
count(*)
FROM
SELECT
count(*)
FROM
table_1, table_2
WHERE
table_1.key = table_2.key;
@ -318,16 +311,16 @@ BEGIN;
ROLLBACK;
-- a simple subquery pushdown that touches all shards
BEGIN;
SELECT
count(*)
FROM
SELECT
count(*)
FROM
(
SELECT
SELECT
random()
FROM
FROM
table_1, table_2, table_3, table_4, table_5
WHERE
table_1.key = table_2.key AND table_2.key = table_3.key AND
table_1.key = table_2.key AND table_2.key = table_3.key AND
table_3.key = table_4.key AND table_4.key = table_5.key
) as foo;
count
@ -349,7 +342,7 @@ BEGIN;
ROLLBACK;
-- simple multi shard update both sequential and parallel modes
-- note that in multi shard modify mode we always add select
-- note that in multi shard modify mode we always add select
-- access for all the shards accessed. But, sequential mode is OK
BEGIN;
UPDATE table_1 SET value = 15;
@ -371,8 +364,8 @@ BEGIN;
ROLLBACK;
-- now UPDATE/DELETE with subselect pushdown
BEGIN;
UPDATE
table_1 SET value = 15
UPDATE
table_1 SET value = 15
WHERE key IN (SELECT key FROM table_2 JOIN table_3 USING (key) WHERE table_2.value = 15);
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1;
table_name | select_access | dml_access | ddl_access
@ -417,19 +410,18 @@ BEGIN;
(2 rows)
ROLLBACK;
-- recursively planned SELECT
BEGIN;
SELECT
count(*)
FROM
SELECT
count(*)
FROM
(
SELECT
SELECT
random()
FROM
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
table_1.key = table_2.key
OFFSET 0
) as foo;
count
@ -448,16 +440,16 @@ ROLLBACK;
-- recursively planned SELECT and coordinator INSERT .. SELECT
BEGIN;
INSERT INTO table_3 (key)
SELECT
SELECT
*
FROM
FROM
(
SELECT
SELECT
random() * 1000
FROM
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
table_1.key = table_2.key
OFFSET 0
) as foo;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1;
@ -469,17 +461,17 @@ BEGIN;
(3 rows)
ROLLBACK;
-- recursively planned SELECT and coordinator INSERT .. SELECT
-- recursively planned SELECT and coordinator INSERT .. SELECT
-- but modifies single shard, marked as sequential operation
BEGIN;
INSERT INTO table_3 (key)
SELECT
SELECT
*
FROM
FROM
(
SELECT
SELECT
random() * 1000
FROM
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
@ -499,16 +491,16 @@ ROLLBACK;
BEGIN;
DELETE FROM table_3 where key IN
(
SELECT
SELECT
*
FROM
FROM
(
SELECT
SELECT
table_1.key
FROM
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
table_1.key = table_2.key
OFFSET 0
) as foo
) AND value IN (SELECT key FROM table_4);
@ -576,7 +568,7 @@ BEGIN;
table_6 | reference_table_access | reference_table_access | not_accessed
(1 row)
ALTER TABLE table_6 ADD COLUMN x INT;
ALTER TABLE table_6 ADD COLUMN x INT;
SELECT * FROM relation_acesses WHERE table_name IN ('table_6');
table_name | select_access | dml_access | ddl_access
------------+------------------------+------------------------+------------------------
@ -887,9 +879,8 @@ NOTICE: truncate cascades to table "table_2"
(2 rows)
ROLLBACK;
-- CTEs with SELECT only should work fine
-- CTEs with SELECT only should work fine
BEGIN;
WITH cte AS (SELECT count(*) FROM table_1)
SELECT * FROM cte;
count
@ -904,7 +895,7 @@ BEGIN;
(1 row)
COMMIT;
-- CTEs with SELECT only in sequential mode should work fine
-- CTEs with SELECT only in sequential mode should work fine
BEGIN;
SET LOCAL citus.multi_shard_modify_mode = 'sequential';
WITH cte AS (SELECT count(*) FROM table_1)
@ -923,7 +914,6 @@ BEGIN;
COMMIT;
-- modifying CTEs should work fine with multi-row inserts, which are by default in sequential
BEGIN;
WITH cte_1 AS (INSERT INTO table_1 VALUES (1000,1000), (1001, 1001), (1002, 1002) RETURNING *)
SELECT * FROM cte_1 ORDER BY 1;
key | value
@ -942,7 +932,6 @@ BEGIN;
ROLLBACK;
-- modifying CTEs should work fine with parallel mode
BEGIN;
WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *)
SELECT count(*) FROM cte_1 ORDER BY 1;
count
@ -959,7 +948,6 @@ BEGIN;
ROLLBACK;
-- modifying CTEs should work fine with sequential mode
BEGIN;
WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *)
SELECT count(*) FROM cte_1 ORDER BY 1;
count
@ -974,13 +962,13 @@ BEGIN;
(1 row)
ROLLBACK;
-- create distributed table with data loading
-- create distributed table with data loading
-- should mark both parallel dml and parallel ddl
DROP TABLE table_3;
CREATE TABLE table_3 (key int, value int);
INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i;
BEGIN;
SELECT create_distributed_table('table_3', 'key');
SELECT create_distributed_table('table_3', 'key');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------

View File

@ -6,24 +6,16 @@ CREATE SCHEMA partitioned_table_replicated;
SET search_path TO partitioned_table_replicated;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 2;
-- print major version number for version-specific tests
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int < 11 AS server_version_is_10;
server_version_is_10
----------------------
f
(1 row)
CREATE TABLE collections (
key bigint,
ts timestamptz,
collection_id integer,
value numeric
value numeric
) PARTITION BY LIST ( collection_id );
CREATE TABLE collections_1
CREATE TABLE collections_1
PARTITION OF collections (key, ts, collection_id, value)
FOR VALUES IN ( 1 );
CREATE TABLE collections_2
CREATE TABLE collections_2
PARTITION OF collections (key, ts, collection_id, value)
FOR VALUES IN ( 2 );
-- load some data data
@ -46,7 +38,7 @@ CREATE TABLE collections_3 PARTITION OF collections FOR VALUES IN ( 3 );
-- now attaching non distributed table to a distributed table
CREATE TABLE collections_4 AS SELECT * FROM collections LIMIT 0;
-- load some data
INSERT INTO collections_4 SELECT i, '2009-01-01', 4, i FROM generate_series (0, 10) i;
INSERT INTO collections_4 SELECT i, '2009-01-01', 4, i FROM generate_series (0, 10) i;
ALTER TABLE collections ATTACH PARTITION collections_4 FOR VALUES IN ( 4 );
NOTICE: Copying data from local table...
-- finally attach a distributed table to a distributed table
@ -58,15 +50,15 @@ SELECT create_distributed_table('collections_5', 'key');
(1 row)
-- load some data
INSERT INTO collections_5 SELECT i, '2009-01-01', 5, i FROM generate_series (0, 10) i;
INSERT INTO collections_5 SELECT i, '2009-01-01', 5, i FROM generate_series (0, 10) i;
ALTER TABLE collections ATTACH PARTITION collections_5 FOR VALUES IN ( 5 );
-- make sure that we've all the placements
SELECT
SELECT
logicalrelid, count(*) as placement_count
FROM
FROM
pg_dist_shard, pg_dist_shard_placement
WHERE
logicalrelid::text LIKE '%collections%' AND
WHERE
logicalrelid::text LIKE '%collections%' AND
pg_dist_shard.shardid = pg_dist_shard_placement.shardid
GROUP BY
logicalrelid
@ -83,11 +75,11 @@ ORDER BY
(6 rows)
-- and, make sure that all tables are colocated
SELECT
count(DISTINCT colocationid)
FROM
pg_dist_partition
WHERE
SELECT
count(DISTINCT colocationid)
FROM
pg_dist_partition
WHERE
logicalrelid::text LIKE '%collections%';
count
-------
@ -127,7 +119,7 @@ ERROR: modifications on partitions when replication factor is greater than 1 is
HINT: Run the query on the parent table "collections" instead.
\.
invalid command \.
-- DDLs are not allowed
-- DDLs are not allowed
CREATE INDEX index_on_partition ON collections_1(key);
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
@ -143,7 +135,7 @@ TRUNCATE collections, collections_1;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
-- modifying CTEs are also not allowed
WITH collections_5_cte AS
WITH collections_5_cte AS
(
DELETE FROM collections_5 RETURNING *
)
@ -159,10 +151,10 @@ SELECT create_distributed_table('fkey_test', 'key');
(1 row)
ALTER TABLE
collections_5
ADD CONSTRAINT
fkey_delete FOREIGN KEY(key)
REFERENCES
collections_5
ADD CONSTRAINT
fkey_delete FOREIGN KEY(key)
REFERENCES
fkey_test(key) ON DELETE CASCADE;
ERROR: cannot create foreign key constraint
DETAIL: Citus Community Edition currently supports foreign key constraints only for "citus.shard_replication_factor = 1".
@ -205,7 +197,7 @@ INSERT INTO collections_agg SELECT collection_id, sum(key) FROM collections_1 GR
-- now make sure that repair functionality works fine
-- create a table and create its distribution metadata
CREATE TABLE customer_engagements (id integer, event_id int) PARTITION BY LIST ( event_id );
CREATE TABLE customer_engagements_1
CREATE TABLE customer_engagements_1
PARTITION OF customer_engagements
FOR VALUES IN ( 1 );
CREATE TABLE customer_engagements_2
@ -233,7 +225,7 @@ INSERT INTO customer_engagements VALUES (2, 2);
-- the following queries does the following:
-- (i) create a new shard
-- (ii) mark the second shard placements as unhealthy
-- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones
-- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones
-- (iv) do a successful master_copy_shard_placement from the first placement to the second
-- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset

View File

@ -1,299 +0,0 @@
--
-- Distributed Partitioned Table Tests
--
SET citus.next_shard_id TO 1760000;
CREATE SCHEMA partitioned_table_replicated;
SET search_path TO partitioned_table_replicated;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 2;
-- print major version number for version-specific tests
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int < 11 AS server_version_is_10;
server_version_is_10
----------------------
t
(1 row)
CREATE TABLE collections (
key bigint,
ts timestamptz,
collection_id integer,
value numeric
) PARTITION BY LIST ( collection_id );
CREATE TABLE collections_1
PARTITION OF collections (key, ts, collection_id, value)
FOR VALUES IN ( 1 );
CREATE TABLE collections_2
PARTITION OF collections (key, ts, collection_id, value)
FOR VALUES IN ( 2 );
-- load some data data
INSERT INTO collections (key, ts, collection_id, value) VALUES (1, '2009-01-01', 1, 1);
INSERT INTO collections (key, ts, collection_id, value) VALUES (2, '2009-01-01', 1, 2);
INSERT INTO collections (key, ts, collection_id, value) VALUES (3, '2009-01-01', 2, 1);
INSERT INTO collections (key, ts, collection_id, value) VALUES (4, '2009-01-01', 2, 2);
-- in the first case, we'll distributed the
-- already existing partitioninong hierarcy
SELECT create_distributed_table('collections', 'key');
NOTICE: Copying data from local table...
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
-- now create partition of a already distributed table
CREATE TABLE collections_3 PARTITION OF collections FOR VALUES IN ( 3 );
-- now attaching non distributed table to a distributed table
CREATE TABLE collections_4 AS SELECT * FROM collections LIMIT 0;
-- load some data
INSERT INTO collections_4 SELECT i, '2009-01-01', 4, i FROM generate_series (0, 10) i;
ALTER TABLE collections ATTACH PARTITION collections_4 FOR VALUES IN ( 4 );
NOTICE: Copying data from local table...
-- finally attach a distributed table to a distributed table
CREATE TABLE collections_5 AS SELECT * FROM collections LIMIT 0;
SELECT create_distributed_table('collections_5', 'key');
create_distributed_table
--------------------------
(1 row)
-- load some data
INSERT INTO collections_5 SELECT i, '2009-01-01', 5, i FROM generate_series (0, 10) i;
ALTER TABLE collections ATTACH PARTITION collections_5 FOR VALUES IN ( 5 );
-- make sure that we've all the placements
SELECT
logicalrelid, count(*) as placement_count
FROM
pg_dist_shard, pg_dist_shard_placement
WHERE
logicalrelid::text LIKE '%collections%' AND
pg_dist_shard.shardid = pg_dist_shard_placement.shardid
GROUP BY
logicalrelid
ORDER BY
1,2;
logicalrelid | placement_count
---------------+-----------------
collections | 8
collections_1 | 8
collections_2 | 8
collections_3 | 8
collections_4 | 8
collections_5 | 8
(6 rows)
-- and, make sure that all tables are colocated
SELECT
count(DISTINCT colocationid)
FROM
pg_dist_partition
WHERE
logicalrelid::text LIKE '%collections%';
count
-------
1
(1 row)
-- make sure that any kind of modification is disallowed on partitions
-- given that replication factor > 1
INSERT INTO collections_4 (key, ts, collection_id, value) VALUES (4, '2009-01-01', 2, 2);
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
-- single shard update/delete not allowed
UPDATE collections_1 SET ts = now() WHERE key = 1;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
DELETE FROM collections_1 WHERE ts = now() AND key = 1;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
-- multi shard update/delete are not allowed
UPDATE collections_1 SET ts = now();
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
DELETE FROM collections_1 WHERE ts = now();
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
-- insert..select pushdown
INSERT INTO collections_1 SELECT * FROM collections_1;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
-- insert..select via coordinator
INSERT INTO collections_1 SELECT * FROM collections_1 OFFSET 0;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
-- COPY is not allowed
COPY collections_1 FROM STDIN;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
\.
invalid command \.
-- DDLs are not allowed
CREATE INDEX index_on_partition ON collections_1(key);
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
-- EXPLAIN with modifications is not allowed as well
UPDATE collections_1 SET ts = now() WHERE key = 1;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
-- TRUNCATE is also not allowed
TRUNCATE collections_1;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
TRUNCATE collections, collections_1;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
-- modifying CTEs are also not allowed
WITH collections_5_cte AS
(
DELETE FROM collections_5 RETURNING *
)
SELECT * FROM collections_5_cte;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "collections" instead.
-- foreign key creation is disallowed due to replication factor > 1
CREATE TABLE fkey_test (key bigint PRIMARY KEY);
SELECT create_distributed_table('fkey_test', 'key');
create_distributed_table
--------------------------
(1 row)
ALTER TABLE
collections_5
ADD CONSTRAINT
fkey_delete FOREIGN KEY(key)
REFERENCES
fkey_test(key) ON DELETE CASCADE;
ERROR: cannot create foreign key constraint
DETAIL: Citus Community Edition currently supports foreign key constraints only for "citus.shard_replication_factor = 1".
HINT: Please change "citus.shard_replication_factor to 1". To learn more about using foreign keys with other replication factors, please contact us at https://citusdata.com/about/contact_us.
-- we should be able to attach and detach partitions
-- given that those DDLs are on the parent table
CREATE TABLE collections_6
PARTITION OF collections (key, ts, collection_id, value)
FOR VALUES IN ( 6 );
ALTER TABLE collections DETACH PARTITION collections_6;
ALTER TABLE collections ATTACH PARTITION collections_6 FOR VALUES IN ( 6 );
-- read queries works just fine
SELECT count(*) FROM collections_1 WHERE key = 1;
count
-------
1
(1 row)
SELECT count(*) FROM collections_1 WHERE key != 1;
count
-------
1
(1 row)
-- rollups SELECT'ing from partitions should work just fine
CREATE TABLE collections_agg (
key bigint,
sum_value numeric
);
SELECT create_distributed_table('collections_agg', 'key');
create_distributed_table
--------------------------
(1 row)
-- pushdown roll-up
INSERT INTO collections_agg SELECT key, sum(key) FROM collections_1 GROUP BY key;
-- coordinator roll-up
INSERT INTO collections_agg SELECT collection_id, sum(key) FROM collections_1 GROUP BY collection_id;
-- now make sure that repair functionality works fine
-- create a table and create its distribution metadata
CREATE TABLE customer_engagements (id integer, event_id int) PARTITION BY LIST ( event_id );
CREATE TABLE customer_engagements_1
PARTITION OF customer_engagements
FOR VALUES IN ( 1 );
CREATE TABLE customer_engagements_2
PARTITION OF customer_engagements
FOR VALUES IN ( 2 );
-- add some indexes
CREATE INDEX ON customer_engagements (id);
ERROR: cannot create index on partitioned table "customer_engagements"
CREATE INDEX ON customer_engagements (event_id);
ERROR: cannot create index on partitioned table "customer_engagements"
CREATE INDEX ON customer_engagements (id, event_id);
ERROR: cannot create index on partitioned table "customer_engagements"
-- distribute the table
-- create a single shard on the first worker
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('customer_engagements', 'id', 'hash');
create_distributed_table
--------------------------
(1 row)
-- ingest some data for the tests
INSERT INTO customer_engagements VALUES (1, 1);
INSERT INTO customer_engagements VALUES (2, 1);
INSERT INTO customer_engagements VALUES (1, 2);
INSERT INTO customer_engagements VALUES (2, 2);
-- the following queries does the following:
-- (i) create a new shard
-- (ii) mark the second shard placements as unhealthy
-- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones
-- (iv) do a successful master_copy_shard_placement from the first placement to the second
-- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
-- get the newshardid
SELECT shardid as newshardid FROM pg_dist_shard WHERE logicalrelid = 'customer_engagements'::regclass
\gset
-- now, update the second placement as unhealthy
UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid
AND groupid = :worker_2_group;
-- cannot repair a shard after a modification (transaction still open during repair)
BEGIN;
INSERT INTO customer_engagements VALUES (1, 1);
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
ERROR: cannot open new connections after the first modification command within a transaction
ROLLBACK;
-- modifications after reparing a shard are fine (will use new metadata)
BEGIN;
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
master_copy_shard_placement
-----------------------------
(1 row)
ALTER TABLE customer_engagements ADD COLUMN value float DEFAULT 1.0;
SELECT * FROM customer_engagements ORDER BY 1,2,3;
id | event_id | value
----+----------+-------
1 | 1 | 1
1 | 2 | 1
2 | 1 | 1
2 | 2 | 1
(4 rows)
ROLLBACK;
BEGIN;
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
master_copy_shard_placement
-----------------------------
(1 row)
INSERT INTO customer_engagements VALUES (1, 1);
SELECT count(*) FROM customer_engagements;
count
-------
5
(1 row)
ROLLBACK;
-- TRUNCATE is allowed on the parent table
-- try it just before dropping the table
TRUNCATE collections;
SET search_path TO public;
DROP SCHEMA partitioned_table_replicated CASCADE;
NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table partitioned_table_replicated.collections
drop cascades to table partitioned_table_replicated.fkey_test
drop cascades to table partitioned_table_replicated.collections_agg
drop cascades to table partitioned_table_replicated.customer_engagements

View File

@ -3,14 +3,6 @@
--
-- Tests basic PROCEDURE functionality with SQL and PLPGSQL procedures.
--
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
t
(1 row)
SET citus.next_shard_id TO 100500;
CREATE SCHEMA procedure_schema;
SET SEARCH_PATH = procedure_schema;

View File

@ -1,284 +0,0 @@
--
-- SQL_PROCEDURE
--
-- Tests basic PROCEDURE functionality with SQL and PLPGSQL procedures.
--
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
f
(1 row)
SET citus.next_shard_id TO 100500;
CREATE SCHEMA procedure_schema;
SET SEARCH_PATH = procedure_schema;
CREATE TABLE test_table(id integer , org_id integer);
CREATE UNIQUE INDEX idx_table ON test_table(id, org_id);
SELECT create_distributed_table('test_table','id');
create_distributed_table
--------------------------
(1 row)
INSERT INTO test_table VALUES(1, 1);
-- test CREATE PROCEDURE
CREATE PROCEDURE test_procedure_delete_insert(id int, org_id int) LANGUAGE SQL AS $$
DELETE FROM test_table;
INSERT INTO test_table VALUES(id, org_id);
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE test_procedure_delete_insert(id int, org_id...
^
CALL test_procedure_delete_insert(2,3);
ERROR: syntax error at or near "CALL"
LINE 1: CALL test_procedure_delete_insert(2,3);
^
SELECT * FROM test_table ORDER BY 1, 2;
id | org_id
----+--------
1 | 1
(1 row)
-- commit/rollback is not allowed in procedures in SQL
-- following calls should fail
CREATE PROCEDURE test_procedure_commit(tt_id int, tt_org_id int) LANGUAGE SQL AS $$
DELETE FROM test_table;
COMMIT;
INSERT INTO test_table VALUES(tt_id, -1);
UPDATE test_table SET org_id = tt_org_id WHERE id = tt_id;
COMMIT;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE test_procedure_commit(tt_id int, tt_org_id ...
^
CALL test_procedure_commit(2,5);
ERROR: syntax error at or near "CALL"
LINE 1: CALL test_procedure_commit(2,5);
^
SELECT * FROM test_table ORDER BY 1, 2;
id | org_id
----+--------
1 | 1
(1 row)
CREATE PROCEDURE test_procedure_rollback(tt_id int, tt_org_id int) LANGUAGE SQL AS $$
DELETE FROM test_table;
ROLLBACK;
UPDATE test_table SET org_id = tt_org_id WHERE id = tt_id;
COMMIT;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE test_procedure_rollback(tt_id int, tt_org_i...
^
CALL test_procedure_rollback(2,15);
ERROR: syntax error at or near "CALL"
LINE 1: CALL test_procedure_rollback(2,15);
^
SELECT * FROM test_table ORDER BY 1, 2;
id | org_id
----+--------
1 | 1
(1 row)
DROP PROCEDURE test_procedure_delete_insert(int, int);
ERROR: syntax error at or near "PROCEDURE"
LINE 1: DROP PROCEDURE test_procedure_delete_insert(int, int);
^
DROP PROCEDURE test_procedure_commit(int, int);
ERROR: syntax error at or near "PROCEDURE"
LINE 1: DROP PROCEDURE test_procedure_commit(int, int);
^
DROP PROCEDURE test_procedure_rollback(int, int);
ERROR: syntax error at or near "PROCEDURE"
LINE 1: DROP PROCEDURE test_procedure_rollback(int, int);
^
-- same tests with plpgsql
-- test CREATE PROCEDURE
CREATE PROCEDURE test_procedure_delete_insert(id int, org_id int) LANGUAGE PLPGSQL AS $$
BEGIN
DELETE FROM test_table;
INSERT INTO test_table VALUES(id, org_id);
END;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE test_procedure_delete_insert(id int, org_id...
^
CALL test_procedure_delete_insert(2,3);
ERROR: syntax error at or near "CALL"
LINE 1: CALL test_procedure_delete_insert(2,3);
^
SELECT * FROM test_table ORDER BY 1, 2;
id | org_id
----+--------
1 | 1
(1 row)
-- notice that the update succeed and committed
CREATE PROCEDURE test_procedure_modify_insert(tt_id int, tt_org_id int) LANGUAGE PLPGSQL AS $$
BEGIN
UPDATE test_table SET org_id = tt_org_id WHERE id = tt_id;
COMMIT;
INSERT INTO test_table VALUES (tt_id, tt_org_id);
ROLLBACK;
END;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE test_procedure_modify_insert(tt_id int, tt_...
^
CALL test_procedure_modify_insert(2,12);
ERROR: syntax error at or near "CALL"
LINE 1: CALL test_procedure_modify_insert(2,12);
^
SELECT * FROM test_table ORDER BY 1, 2;
id | org_id
----+--------
1 | 1
(1 row)
CREATE PROCEDURE test_procedure_modify_insert_commit(tt_id int, tt_org_id int) LANGUAGE PLPGSQL AS $$
BEGIN
UPDATE test_table SET org_id = tt_org_id WHERE id = tt_id;
COMMIT;
INSERT INTO test_table VALUES (tt_id, tt_org_id);
COMMIT;
END;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE test_procedure_modify_insert_commit(tt_id i...
^
CALL test_procedure_modify_insert_commit(2,30);
ERROR: syntax error at or near "CALL"
LINE 1: CALL test_procedure_modify_insert_commit(2,30);
^
SELECT * FROM test_table ORDER BY 1, 2;
id | org_id
----+--------
1 | 1
(1 row)
-- delete is commited but insert is rolled back
CREATE PROCEDURE test_procedure_rollback(tt_id int, tt_org_id int) LANGUAGE PLPGSQL AS $$
BEGIN
DELETE FROM test_table;
COMMIT;
INSERT INTO test_table VALUES (tt_id, tt_org_id);
ROLLBACK;
END;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE test_procedure_rollback(tt_id int, tt_org_i...
^
CALL test_procedure_rollback(2,5);
ERROR: syntax error at or near "CALL"
LINE 1: CALL test_procedure_rollback(2,5);
^
SELECT * FROM test_table ORDER BY 1, 2;
id | org_id
----+--------
1 | 1
(1 row)
-- rollback is successfull when insert is on multiple rows
CREATE PROCEDURE test_procedure_rollback_2(tt_id int, tt_org_id int) LANGUAGE PLPGSQL AS $$
BEGIN
DELETE FROM test_table;
COMMIT;
INSERT INTO test_table VALUES (tt_id, tt_org_id), (tt_id+1, tt_org_id+1);
ROLLBACK;
END;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE test_procedure_rollback_2(tt_id int, tt_org...
^
CALL test_procedure_rollback_2(12, 15);
ERROR: syntax error at or near "CALL"
LINE 1: CALL test_procedure_rollback_2(12, 15);
^
SELECT * FROM test_table ORDER BY 1, 2;
id | org_id
----+--------
1 | 1
(1 row)
-- delete is rolled back, update is committed
CREATE PROCEDURE test_procedure_rollback_3(tt_id int, tt_org_id int) LANGUAGE PLPGSQL AS $$
BEGIN
DELETE FROM test_table;
ROLLBACK;
UPDATE test_table SET org_id = tt_org_id WHERE id = tt_id;
COMMIT;
END;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE PROCEDURE test_procedure_rollback_3(tt_id int, tt_org...
^
INSERT INTO test_table VALUES (1, 1), (2, 2);
ERROR: duplicate key value violates unique constraint "idx_table_100500"
DETAIL: Key (id, org_id)=(1, 1) already exists.
CONTEXT: while executing command on localhost:57638
CALL test_procedure_rollback_3(2,15);
ERROR: syntax error at or near "CALL"
LINE 1: CALL test_procedure_rollback_3(2,15);
^
SELECT * FROM test_table ORDER BY 1, 2;
id | org_id
----+--------
1 | 1
(1 row)
TRUNCATE test_table;
-- nested procedure calls should roll back normally
CREATE OR REPLACE PROCEDURE test_procedure_rollback(tt_id int, tt_org_id int) LANGUAGE PLPGSQL AS $$
BEGIN
INSERT INTO test_table VALUES (tt_id+12, tt_org_id+12);
ROLLBACK;
END;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE OR REPLACE PROCEDURE test_procedure_rollback(tt_id in...
^
CREATE OR REPLACE PROCEDURE test_procedure_rollback_2(tt_id int, tt_org_id int) LANGUAGE PLPGSQL AS $$
BEGIN
INSERT INTO test_table VALUES (tt_id+2, tt_org_id+1);
ROLLBACK;
END;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE OR REPLACE PROCEDURE test_procedure_rollback_2(tt_id ...
^
CREATE OR REPLACE PROCEDURE test_procedure(tt_id int, tt_org_id int) LANGUAGE PLPGSQL AS $$
BEGIN
CALL test_procedure_rollback(tt_id, tt_org_id);
CALL test_procedure_rollback_2(tt_id, tt_org_id);
INSERT INTO test_table VALUES (tt_id+100, tt_org_id+100);
ROLLBACK;
END;
$$;
ERROR: syntax error at or near "PROCEDURE"
LINE 1: CREATE OR REPLACE PROCEDURE test_procedure(tt_id int, tt_org...
^
SELECT * from test_table;
id | org_id
----+--------
(0 rows)
call test_procedure(1,1);
ERROR: syntax error at or near "call"
LINE 1: call test_procedure(1,1);
^
call test_procedure(20, 20);
ERROR: syntax error at or near "call"
LINE 1: call test_procedure(20, 20);
^
SELECT * from test_table;
id | org_id
----+--------
(0 rows)
\set VERBOSITY terse
DROP SCHEMA procedure_schema CASCADE;
NOTICE: drop cascades to table test_table
\set VERBOSITY default
RESET SEARCH_PATH;

View File

@ -1,13 +1,6 @@
-- ===================================================================
-- test top level window functions that are pushdownable
-- ===================================================================
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
t
(1 row)
-- a very simple window function with an aggregate and a window function
-- distribution column is on the partition by clause
SELECT
@ -55,9 +48,9 @@ FROM (
DISTINCT us.user_id, us.value_2, value_1, random() as r1
FROM
users_table as us, events_table
WHERE
WHERE
us.user_id = events_table.user_id AND event_type IN (1,2)
ORDER BY
ORDER BY
user_id, value_2
) s
GROUP BY
@ -100,7 +93,7 @@ ORDER BY
5 | 0
(32 rows)
-- window function operates on the results of
-- window function operates on the results of
-- a join
SELECT
us.user_id,
@ -134,7 +127,7 @@ FROM
JOIN
events_table ev
USING (user_id )
) j
) j
GROUP BY
user_id,
value_1
@ -204,15 +197,15 @@ ORDER BY
DROP VIEW users_view, window_view;
-- window function uses columns from two different tables
SELECT
SELECT
DISTINCT ON (events_table.user_id, rnk) events_table.user_id, rank() OVER my_win AS rnk
FROM
FROM
events_table, users_table
WHERE
WHERE
users_table.user_id = events_table.user_id
WINDOW
my_win AS (PARTITION BY events_table.user_id, users_table.value_1 ORDER BY events_table.time DESC)
ORDER BY
ORDER BY
rnk DESC, 1 DESC
LIMIT 10;
user_id | rnk
@ -230,15 +223,15 @@ LIMIT 10;
(10 rows)
-- the same query with reference table column is also on the partition by clause
SELECT
SELECT
DISTINCT ON (events_table.user_id, rnk) events_table.user_id, rank() OVER my_win AS rnk
FROM
FROM
events_table, users_ref_test_table uref
WHERE
WHERE
uref.id = events_table.user_id
WINDOW
my_win AS (PARTITION BY events_table.user_id, uref.k_no ORDER BY events_table.time DESC)
ORDER BY
ORDER BY
rnk DESC, 1 DESC
LIMIT 10;
user_id | rnk
@ -257,15 +250,15 @@ LIMIT 10;
-- similar query with no distribution column is on the partition by clause
-- is not supported
SELECT
SELECT
DISTINCT ON (events_table.user_id, rnk) events_table.user_id, rank() OVER my_win AS rnk
FROM
FROM
events_table, users_ref_test_table uref
WHERE
WHERE
uref.id = events_table.user_id
WINDOW
my_win AS (PARTITION BY events_table.value_2, uref.k_no ORDER BY events_table.time DESC)
ORDER BY
ORDER BY
rnk DESC, 1 DESC
LIMIT 10;
ERROR: could not run distributed query because the window function that is used cannot be pushed down
@ -301,7 +294,7 @@ ORDER BY
SELECT
COUNT(*) OVER (PARTITION BY user_id, user_id + 1),
rank() OVER (PARTITION BY user_id) as cnt1,
COUNT(*) OVER (PARTITION BY user_id, abs(value_1 - value_2)) as cnt2,
COUNT(*) OVER (PARTITION BY user_id, abs(value_1 - value_2)) as cnt2,
date_trunc('min', lag(time) OVER (PARTITION BY user_id ORDER BY time)) as datee,
rank() OVER my_win as rnnk,
avg(CASE
@ -331,7 +324,7 @@ LIMIT 5;
-- some tests with GROUP BY along with PARTITION BY
SELECT
user_id,
user_id,
rank() OVER my_win as my_rank,
avg(avg(event_type)) OVER my_win_2 as avg,
max(time) as mx_time
@ -664,17 +657,17 @@ ORDER BY
(66 rows)
-- some tests with GROUP BY, HAVING and LIMIT
SELECT
SELECT
user_id, sum(event_type) OVER my_win , event_type
FROM
events_table
GROUP BY
user_id, event_type
HAVING count(*) > 2
HAVING count(*) > 2
WINDOW my_win AS (PARTITION BY user_id, max(event_type) ORDER BY count(*) DESC)
ORDER BY
ORDER BY
2 DESC, 3 DESC, 1 DESC
LIMIT
LIMIT
5;
user_id | sum | event_type
---------+-----+------------
@ -737,11 +730,10 @@ LIMIT
4 | 2
(2 rows)
-- not a meaningful query, with interesting syntax
SELECT
user_id,
AVG(avg(value_1)) OVER (PARTITION BY user_id, max(user_id), MIN(value_2)),
user_id,
AVG(avg(value_1)) OVER (PARTITION BY user_id, max(user_id), MIN(value_2)),
AVG(avg(user_id)) OVER (PARTITION BY user_id, min(user_id), AVG(value_1))
FROM
users_table
@ -762,8 +754,8 @@ ORDER BY
SELECT coordinator_plan($Q$
EXPLAIN (COSTS FALSE)
SELECT
user_id,
AVG(avg(value_1)) OVER (PARTITION BY user_id, max(user_id), MIN(value_2)),
user_id,
AVG(avg(value_1)) OVER (PARTITION BY user_id, max(user_id), MIN(value_2)),
AVG(avg(user_id)) OVER (PARTITION BY user_id, min(user_id), AVG(value_1))
FROM
users_table
@ -848,7 +840,7 @@ LIMIT 5;
4 | 17 | 3.5000000000000000
(5 rows)
-- rank and ordering in the reverse order
-- rank and ordering in the reverse order
SELECT
user_id,
avg(value_1),

View File

@ -1,933 +0,0 @@
-- ===================================================================
-- test top level window functions that are pushdownable
-- ===================================================================
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
version_above_ten
-------------------
f
(1 row)
-- a very simple window function with an aggregate and a window function
-- distribution column is on the partition by clause
SELECT
user_id, COUNT(*) OVER (PARTITION BY user_id),
rank() OVER (PARTITION BY user_id)
FROM
users_table
ORDER BY
1 DESC, 2 DESC, 3 DESC
LIMIT 5;
user_id | count | rank
---------+-------+------
6 | 10 | 1
6 | 10 | 1
6 | 10 | 1
6 | 10 | 1
6 | 10 | 1
(5 rows)
-- a more complicated window clause, including an aggregate
-- in both the window clause and the target entry
SELECT
user_id, avg(avg(value_3)) OVER (PARTITION BY user_id, MIN(value_2))
FROM
users_table
GROUP BY
1
ORDER BY
2 DESC NULLS LAST, 1 DESC;
user_id | avg
---------+------------------
2 | 3
4 | 2.82608695652174
3 | 2.70588235294118
6 | 2.6
1 | 2.57142857142857
5 | 2.46153846153846
(6 rows)
-- window clause operates on the results of a subquery
SELECT
user_id, max(value_1) OVER (PARTITION BY user_id, MIN(value_2))
FROM (
SELECT
DISTINCT us.user_id, us.value_2, value_1, random() as r1
FROM
users_table as us, events_table
WHERE
us.user_id = events_table.user_id AND event_type IN (1,2)
ORDER BY
user_id, value_2
) s
GROUP BY
1, value_1
ORDER BY
2 DESC, 1;
user_id | max
---------+-----
1 | 5
3 | 5
3 | 5
4 | 5
5 | 5
5 | 5
6 | 5
6 | 5
1 | 4
2 | 4
3 | 4
3 | 4
3 | 4
4 | 4
4 | 4
5 | 4
5 | 4
1 | 3
2 | 3
2 | 3
2 | 3
6 | 3
2 | 2
4 | 2
4 | 2
4 | 2
6 | 2
1 | 1
3 | 1
5 | 1
6 | 1
5 | 0
(32 rows)
-- window function operates on the results of
-- a join
SELECT
us.user_id,
SUM(us.value_1) OVER (PARTITION BY us.user_id)
FROM
users_table us
JOIN
events_table ev
ON (us.user_id = ev.user_id)
GROUP BY
1,
value_1
ORDER BY
1,
2
LIMIT 5;
user_id | sum
---------+-----
1 | 13
1 | 13
1 | 13
1 | 13
2 | 10
(5 rows)
-- the same query, but this time join with an alias
SELECT
user_id, value_1, SUM(j.value_1) OVER (PARTITION BY j.user_id)
FROM
(users_table us
JOIN
events_table ev
USING (user_id )
) j
GROUP BY
user_id,
value_1
ORDER BY
3 DESC, 2 DESC, 1 DESC
LIMIT 5;
user_id | value_1 | sum
---------+---------+-----
5 | 5 | 15
4 | 5 | 15
3 | 5 | 15
5 | 4 | 15
4 | 4 | 15
(5 rows)
-- querying views that have window functions should be ok
CREATE VIEW window_view AS
SELECT
DISTINCT user_id, rank() OVER (PARTITION BY user_id ORDER BY value_1)
FROM
users_table
GROUP BY
user_id, value_1
HAVING count(*) > 1;
-- Window function in View works
SELECT *
FROM
window_view
ORDER BY
2 DESC, 1
LIMIT 10;
user_id | rank
---------+------
5 | 6
2 | 5
4 | 5
5 | 5
2 | 4
3 | 4
4 | 4
5 | 4
6 | 4
2 | 3
(10 rows)
-- the other way around also should work fine
-- query a view using window functions
CREATE VIEW users_view AS SELECT * FROM users_table;
SELECT
DISTINCT user_id, rank() OVER (PARTITION BY user_id ORDER BY value_1)
FROM
users_view
GROUP BY
user_id, value_1
HAVING count(*) > 4
ORDER BY
2 DESC, 1;
user_id | rank
---------+------
4 | 2
5 | 2
2 | 1
3 | 1
4 | 1
5 | 1
(6 rows)
DROP VIEW users_view, window_view;
-- window function uses columns from two different tables
SELECT
DISTINCT ON (events_table.user_id, rnk) events_table.user_id, rank() OVER my_win AS rnk
FROM
events_table, users_table
WHERE
users_table.user_id = events_table.user_id
WINDOW
my_win AS (PARTITION BY events_table.user_id, users_table.value_1 ORDER BY events_table.time DESC)
ORDER BY
rnk DESC, 1 DESC
LIMIT 10;
user_id | rnk
---------+-----
3 | 121
5 | 118
2 | 116
3 | 115
4 | 113
2 | 111
5 | 109
3 | 109
4 | 106
2 | 106
(10 rows)
-- the same query with reference table column is also on the partition by clause
SELECT
DISTINCT ON (events_table.user_id, rnk) events_table.user_id, rank() OVER my_win AS rnk
FROM
events_table, users_ref_test_table uref
WHERE
uref.id = events_table.user_id
WINDOW
my_win AS (PARTITION BY events_table.user_id, uref.k_no ORDER BY events_table.time DESC)
ORDER BY
rnk DESC, 1 DESC
LIMIT 10;
user_id | rnk
---------+-----
2 | 24
2 | 23
2 | 22
3 | 21
2 | 21
3 | 20
2 | 20
3 | 19
2 | 19
3 | 18
(10 rows)
-- similar query with no distribution column is on the partition by clause
-- is not supported
SELECT
DISTINCT ON (events_table.user_id, rnk) events_table.user_id, rank() OVER my_win AS rnk
FROM
events_table, users_ref_test_table uref
WHERE
uref.id = events_table.user_id
WINDOW
my_win AS (PARTITION BY events_table.value_2, uref.k_no ORDER BY events_table.time DESC)
ORDER BY
rnk DESC, 1 DESC
LIMIT 10;
ERROR: could not run distributed query because the window function that is used cannot be pushed down
HINT: Window functions are supported in two ways. Either add an equality filter on the distributed tables' partition column or use the window functions with a PARTITION BY clause containing the distribution column
-- ORDER BY in the window function is an aggragate
SELECT
user_id, rank() OVER my_win as rnk, avg(value_2) as avg_val_2
FROM
events_table
GROUP BY
user_id, date_trunc('day', time)
WINDOW
my_win AS (PARTITION BY user_id ORDER BY avg(event_type) DESC)
ORDER BY
3 DESC, 2 DESC, 1 DESC;
user_id | rnk | avg_val_2
---------+-----+--------------------
1 | 1 | 3.3750000000000000
3 | 2 | 3.1666666666666667
5 | 1 | 2.6666666666666667
6 | 1 | 2.5000000000000000
4 | 1 | 2.5000000000000000
2 | 1 | 2.4736842105263158
4 | 2 | 2.4000000000000000
1 | 2 | 2.1428571428571429
5 | 2 | 2.0909090909090909
6 | 2 | 2.0000000000000000
2 | 2 | 2.0000000000000000
3 | 1 | 1.8000000000000000
(12 rows)
-- lets push the limits of writing complex expressions aling with the window functions
SELECT
COUNT(*) OVER (PARTITION BY user_id, user_id + 1),
rank() OVER (PARTITION BY user_id) as cnt1,
COUNT(*) OVER (PARTITION BY user_id, abs(value_1 - value_2)) as cnt2,
date_trunc('min', lag(time) OVER (PARTITION BY user_id ORDER BY time)) as datee,
rank() OVER my_win as rnnk,
avg(CASE
WHEN user_id > 4
THEN value_1
ELSE value_2
END) FILTER (WHERE user_id > 2) OVER my_win_2 as filtered_count,
sum(user_id * (5.0 / (value_1 + value_2 + 0.1)) * value_3) FILTER (WHERE value_1::text LIKE '%1%') OVER my_win_4 as cnt_with_filter_2
FROM
users_table
WINDOW
my_win AS (PARTITION BY user_id, (value_1%3)::int ORDER BY time DESC),
my_win_2 AS (PARTITION BY user_id, (value_1)::int ORDER BY time DESC),
my_win_3 AS (PARTITION BY user_id, date_trunc('min', time)),
my_win_4 AS (my_win_3 ORDER BY value_2, value_3)
ORDER BY
cnt_with_filter_2 DESC NULLS LAST, filtered_count DESC NULLS LAST, datee DESC NULLS LAST, rnnk DESC, cnt2 DESC, cnt1 DESC, user_id DESC
LIMIT 5;
count | cnt1 | cnt2 | datee | rnnk | filtered_count | cnt_with_filter_2
-------+------+------+--------------------------+------+------------------------+-------------------
23 | 1 | 7 | Thu Nov 23 02:14:00 2017 | 6 | 0.00000000000000000000 | 72.7272727272727
10 | 1 | 3 | Wed Nov 22 23:01:00 2017 | 1 | 1.00000000000000000000 | 57.1428571428571
17 | 1 | 5 | Wed Nov 22 23:24:00 2017 | 8 | 3.0000000000000000 | 28.5714285714286
17 | 1 | 5 | | 10 | 2.6666666666666667 | 28.5714285714286
17 | 1 | 5 | Thu Nov 23 00:15:00 2017 | 7 | 3.6666666666666667 | 24.1935483870968
(5 rows)
-- some tests with GROUP BY along with PARTITION BY
SELECT
user_id,
rank() OVER my_win as my_rank,
avg(avg(event_type)) OVER my_win_2 as avg,
max(time) as mx_time
FROM
events_table
GROUP BY
user_id,
value_2
WINDOW
my_win AS (PARTITION BY user_id, max(event_type) ORDER BY count(*) DESC),
my_win_2 AS (PARTITION BY user_id, avg(user_id) ORDER BY count(*) DESC)
ORDER BY
avg DESC,
mx_time DESC,
my_rank DESC,
user_id DESC;
user_id | my_rank | avg | mx_time
---------+---------+------------------------+---------------------------------
6 | 1 | 3.0000000000000000 | Thu Nov 23 14:00:13.20013 2017
6 | 2 | 3.0000000000000000 | Thu Nov 23 11:16:13.106691 2017
6 | 1 | 3.0000000000000000 | Thu Nov 23 07:27:32.822068 2017
3 | 1 | 2.9857142857142857 | Thu Nov 23 16:31:56.219594 2017
4 | 2 | 2.9555555555555556 | Thu Nov 23 14:19:25.765876 2017
4 | 1 | 2.9555555555555556 | Thu Nov 23 08:36:53.871919 2017
1 | 4 | 2.8633333333333333 | Wed Nov 22 21:06:57.457147 2017
1 | 1 | 2.8250000000000000 | Thu Nov 23 21:54:46.924477 2017
2 | 2 | 2.7738095238095238 | Thu Nov 23 13:27:37.441959 2017
1 | 2 | 2.7722222222222222 | Thu Nov 23 09:23:30.994345 2017
3 | 1 | 2.7682539682539682 | Thu Nov 23 01:17:49.040685 2017
2 | 1 | 2.7142857142857143 | Thu Nov 23 15:58:49.273421 2017
1 | 3 | 2.5791666666666667 | Thu Nov 23 11:09:38.074595 2017
3 | 1 | 2.5714285714285714 | Thu Nov 23 16:44:41.903713 2017
2 | 1 | 2.5158730158730159 | Thu Nov 23 14:02:47.738901 2017
4 | 1 | 2.47777777777777778333 | Thu Nov 23 16:20:33.264457 2017
4 | 3 | 2.47777777777777778333 | Thu Nov 23 08:14:18.231273 2017
4 | 3 | 2.47777777777777778333 | Thu Nov 23 07:32:45.521278 2017
1 | 1 | 2.4000000000000000 | Thu Nov 23 10:23:27.617726 2017
2 | 1 | 2.3869047619047619 | Thu Nov 23 17:26:14.563216 2017
3 | 1 | 2.3841269841269841 | Thu Nov 23 18:08:26.550729 2017
3 | 1 | 2.3841269841269841 | Thu Nov 23 09:38:45.338008 2017
3 | 2 | 2.3841269841269841 | Thu Nov 23 06:44:50.887182 2017
2 | 2 | 2.3095238095238095 | Thu Nov 23 04:05:16.217731 2017
5 | 2 | 2.3000000000000000 | Thu Nov 23 14:28:51.833214 2017
5 | 2 | 2.3000000000000000 | Thu Nov 23 14:23:09.889786 2017
4 | 1 | 2.2000000000000000 | Thu Nov 23 18:10:21.338399 2017
2 | 1 | 2.09126984126984126667 | Thu Nov 23 03:35:04.321504 2017
5 | 1 | 2.0000000000000000 | Thu Nov 23 16:11:02.929469 2017
5 | 1 | 2.0000000000000000 | Thu Nov 23 14:40:40.467511 2017
5 | 1 | 2.0000000000000000 | Thu Nov 23 13:26:45.571108 2017
(31 rows)
-- test for range and rows mode and different window functions
-- mostly to make sure that deparsing works fine
SELECT
user_id,
rank() OVER (PARTITION BY user_id ROWS BETWEEN
UNBOUNDED PRECEDING AND CURRENT ROW),
dense_rank() OVER (PARTITION BY user_id RANGE BETWEEN
UNBOUNDED PRECEDING AND CURRENT ROW),
CUME_DIST() OVER (PARTITION BY user_id RANGE BETWEEN
UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING),
PERCENT_RANK() OVER (PARTITION BY user_id ORDER BY avg(value_1) RANGE BETWEEN
UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
FROM
users_table
GROUP BY
1
ORDER BY
4 DESC,3 DESC,2 DESC ,1 DESC;
user_id | rank | dense_rank | cume_dist | percent_rank
---------+------+------------+-----------+--------------
6 | 1 | 1 | 1 | 0
5 | 1 | 1 | 1 | 0
4 | 1 | 1 | 1 | 0
3 | 1 | 1 | 1 | 0
2 | 1 | 1 | 1 | 0
1 | 1 | 1 | 1 | 0
(6 rows)
-- test exclude supported
SELECT
user_id,
value_1,
array_agg(value_1) OVER (PARTITION BY user_id ORDER BY value_1 RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW),
array_agg(value_1) OVER (PARTITION BY user_id ORDER BY value_1 RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE CURRENT ROW)
FROM
users_table
WHERE
user_id > 2 AND user_id < 6
ORDER BY
user_id, value_1, 3, 4;
ERROR: syntax error at or near "EXCLUDE"
LINE 5: ...ANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE CU...
^
-- test <offset> preceding and <offset> following on RANGE window
SELECT
user_id,
value_1,
array_agg(value_1) OVER range_window,
array_agg(value_1) OVER range_window_exclude
FROM
users_table
WHERE
user_id > 2 AND user_id < 6
WINDOW
range_window as (PARTITION BY user_id ORDER BY value_1 RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING),
range_window_exclude as (PARTITION BY user_id ORDER BY value_1 RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW)
ORDER BY
user_id, value_1, 3, 4;
ERROR: RANGE PRECEDING is only supported with UNBOUNDED
LINE 11: ..._window as (PARTITION BY user_id ORDER BY value_1 RANGE BETW...
^
-- test <offset> preceding and <offset> following on ROW window
SELECT
user_id,
value_1,
array_agg(value_1) OVER row_window,
array_agg(value_1) OVER row_window_exclude
FROM
users_table
WHERE
user_id > 2 and user_id < 6
WINDOW
row_window as (PARTITION BY user_id ORDER BY value_1 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING),
row_window_exclude as (PARTITION BY user_id ORDER BY value_1 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW)
ORDER BY
user_id, value_1, 3, 4;
ERROR: syntax error at or near "EXCLUDE"
LINE 12: ...value_1 ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CU...
^
-- some tests with GROUP BY, HAVING and LIMIT
SELECT
user_id, sum(event_type) OVER my_win , event_type
FROM
events_table
GROUP BY
user_id, event_type
HAVING count(*) > 2
WINDOW my_win AS (PARTITION BY user_id, max(event_type) ORDER BY count(*) DESC)
ORDER BY
2 DESC, 3 DESC, 1 DESC
LIMIT
5;
user_id | sum | event_type
---------+-----+------------
4 | 4 | 4
3 | 4 | 4
2 | 4 | 4
1 | 4 | 4
5 | 3 | 3
(5 rows)
-- Group by has more columns than partition by
SELECT
DISTINCT user_id, SUM(value_2) OVER (PARTITION BY user_id)
FROM
users_table
GROUP BY
user_id, value_1, value_2
HAVING count(*) > 2
ORDER BY
2 DESC, 1
LIMIT
10;
user_id | sum
---------+-----
5 | 3
4 | 2
(2 rows)
SELECT
DISTINCT ON (user_id) user_id, SUM(value_2) OVER (PARTITION BY user_id)
FROM
users_table
GROUP BY
user_id, value_1, value_2
HAVING count(*) > 2
ORDER BY
1, 2 DESC
LIMIT
10;
user_id | sum
---------+-----
4 | 2
5 | 3
(2 rows)
SELECT
DISTINCT ON (SUM(value_1) OVER (PARTITION BY user_id)) user_id, SUM(value_2) OVER (PARTITION BY user_id)
FROM
users_table
GROUP BY
user_id, value_1, value_2
HAVING count(*) > 2
ORDER BY
(SUM(value_1) OVER (PARTITION BY user_id)) , 2 DESC, 1
LIMIT
10;
user_id | sum
---------+-----
5 | 3
4 | 2
(2 rows)
-- not a meaningful query, with interesting syntax
SELECT
user_id,
AVG(avg(value_1)) OVER (PARTITION BY user_id, max(user_id), MIN(value_2)),
AVG(avg(user_id)) OVER (PARTITION BY user_id, min(user_id), AVG(value_1))
FROM
users_table
GROUP BY
1
ORDER BY
3 DESC, 2 DESC, 1 DESC;
user_id | avg | avg
---------+--------------------+------------------------
6 | 2.1000000000000000 | 6.0000000000000000
5 | 2.6538461538461538 | 5.0000000000000000
4 | 2.7391304347826087 | 4.0000000000000000
3 | 2.3529411764705882 | 3.0000000000000000
2 | 2.3333333333333333 | 2.0000000000000000
1 | 3.2857142857142857 | 1.00000000000000000000
(6 rows)
SELECT coordinator_plan($Q$
EXPLAIN (COSTS FALSE)
SELECT
user_id,
AVG(avg(value_1)) OVER (PARTITION BY user_id, max(user_id), MIN(value_2)),
AVG(avg(user_id)) OVER (PARTITION BY user_id, min(user_id), AVG(value_1))
FROM
users_table
GROUP BY
1
ORDER BY
3 DESC, 2 DESC, 1 DESC;
$Q$);
coordinator_plan
------------------------------------------------------------------------------------
Sort
Sort Key: remote_scan.avg_1 DESC, remote_scan.avg DESC, remote_scan.user_id DESC
-> HashAggregate
Group Key: remote_scan.user_id
-> Custom Scan (Citus Adaptive)
Task Count: 4
(6 rows)
SELECT
user_id,
1 + sum(value_1),
1 + AVG(value_2) OVER (partition by user_id)
FROM
users_table
GROUP BY
user_id, value_2
ORDER BY
user_id, value_2;
user_id | ?column? | ?column?
---------+----------+--------------------
1 | 5 | 3.2500000000000000
1 | 4 | 3.2500000000000000
1 | 6 | 3.2500000000000000
1 | 12 | 3.2500000000000000
2 | 3 | 3.5000000000000000
2 | 5 | 3.5000000000000000
2 | 13 | 3.5000000000000000
2 | 6 | 3.5000000000000000
2 | 17 | 3.5000000000000000
2 | 4 | 3.5000000000000000
3 | 3 | 4.0000000000000000
3 | 13 | 4.0000000000000000
3 | 10 | 4.0000000000000000
3 | 2 | 4.0000000000000000
3 | 17 | 4.0000000000000000
4 | 4 | 3.5000000000000000
4 | 28 | 3.5000000000000000
4 | 1 | 3.5000000000000000
4 | 11 | 3.5000000000000000
4 | 17 | 3.5000000000000000
4 | 8 | 3.5000000000000000
5 | 7 | 3.5000000000000000
5 | 17 | 3.5000000000000000
5 | 24 | 3.5000000000000000
5 | 9 | 3.5000000000000000
5 | 8 | 3.5000000000000000
5 | 10 | 3.5000000000000000
6 | 6 | 3.0000000000000000
6 | 3 | 3.0000000000000000
6 | 9 | 3.0000000000000000
6 | 3 | 3.0000000000000000
6 | 5 | 3.0000000000000000
(32 rows)
SELECT
user_id,
1 + sum(value_1),
1 + AVG(value_2) OVER (partition by user_id)
FROM
users_table
GROUP BY
user_id, value_2
ORDER BY
2 DESC, 1
LIMIT 5;
user_id | ?column? | ?column?
---------+----------+--------------------
4 | 28 | 3.5000000000000000
5 | 24 | 3.5000000000000000
2 | 17 | 3.5000000000000000
3 | 17 | 4.0000000000000000
4 | 17 | 3.5000000000000000
(5 rows)
-- rank and ordering in the reverse order
SELECT
user_id,
avg(value_1),
RANK() OVER (partition by user_id order by value_2)
FROM
users_table
GROUP BY user_id, value_2
ORDER BY user_id, value_2 DESC;
user_id | avg | rank
---------+------------------------+------
1 | 3.6666666666666667 | 4
1 | 2.5000000000000000 | 3
1 | 3.0000000000000000 | 2
1 | 4.0000000000000000 | 1
2 | 1.5000000000000000 | 6
2 | 3.2000000000000000 | 5
2 | 1.6666666666666667 | 4
2 | 3.0000000000000000 | 3
2 | 1.3333333333333333 | 2
2 | 2.0000000000000000 | 1
3 | 2.6666666666666667 | 5
3 | 1.00000000000000000000 | 4
3 | 3.0000000000000000 | 3
3 | 2.4000000000000000 | 2
3 | 1.00000000000000000000 | 1
4 | 3.5000000000000000 | 6
4 | 3.2000000000000000 | 5
4 | 3.3333333333333333 | 4
4 | 0.00000000000000000000 | 3
4 | 3.0000000000000000 | 2
4 | 1.00000000000000000000 | 1
5 | 3.0000000000000000 | 6
5 | 2.3333333333333333 | 5
5 | 1.6000000000000000 | 4
5 | 2.8750000000000000 | 3
5 | 3.2000000000000000 | 2
5 | 3.0000000000000000 | 1
6 | 1.3333333333333333 | 5
6 | 2.0000000000000000 | 4
6 | 4.0000000000000000 | 3
6 | 1.00000000000000000000 | 2
6 | 2.5000000000000000 | 1
(32 rows)
-- order by in the window function is same as avg(value_1) DESC
SELECT
user_id,
avg(value_1),
RANK() OVER (partition by user_id order by 1 / (1 + avg(value_1)))
FROM
users_table
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC;
user_id | avg | rank
---------+------------------------+------
1 | 4.0000000000000000 | 1
1 | 3.6666666666666667 | 2
1 | 3.0000000000000000 | 3
1 | 2.5000000000000000 | 4
2 | 3.2000000000000000 | 1
2 | 3.0000000000000000 | 2
2 | 2.0000000000000000 | 3
2 | 1.6666666666666667 | 4
2 | 1.5000000000000000 | 5
2 | 1.3333333333333333 | 6
3 | 3.0000000000000000 | 1
3 | 2.6666666666666667 | 2
3 | 2.4000000000000000 | 3
3 | 1.00000000000000000000 | 4
3 | 1.00000000000000000000 | 4
4 | 3.5000000000000000 | 1
4 | 3.3333333333333333 | 2
4 | 3.2000000000000000 | 3
4 | 3.0000000000000000 | 4
4 | 1.00000000000000000000 | 5
4 | 0.00000000000000000000 | 6
5 | 3.2000000000000000 | 1
5 | 3.0000000000000000 | 2
5 | 3.0000000000000000 | 2
5 | 2.8750000000000000 | 4
5 | 2.3333333333333333 | 5
5 | 1.6000000000000000 | 6
6 | 4.0000000000000000 | 1
6 | 2.5000000000000000 | 2
6 | 2.0000000000000000 | 3
6 | 1.3333333333333333 | 4
6 | 1.00000000000000000000 | 5
(32 rows)
EXPLAIN (COSTS FALSE)
SELECT
user_id,
avg(value_1),
RANK() OVER (partition by user_id order by 1 / (1 + avg(value_1)))
FROM
users_table
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: remote_scan.user_id, (pg_catalog.sum(((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1)))) / pg_catalog.sum(remote_scan.rank)) DESC
-> HashAggregate
Group Key: remote_scan.user_id, remote_scan.worker_column_5
-> Custom Scan (Citus Real-Time)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> WindowAgg
-> Sort
Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1))))
-> HashAggregate
Group Key: users_table.user_id, users_table.value_2
-> Seq Scan on users_table_1400256 users_table
(15 rows)
-- order by in the window function is same as avg(value_1) DESC
SELECT
user_id,
avg(value_1),
RANK() OVER (partition by user_id order by 1 / (1 + avg(value_1)))
FROM
users_table
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC;
user_id | avg | rank
---------+------------------------+------
1 | 4.0000000000000000 | 1
1 | 3.6666666666666667 | 2
1 | 3.0000000000000000 | 3
1 | 2.5000000000000000 | 4
2 | 3.2000000000000000 | 1
2 | 3.0000000000000000 | 2
2 | 2.0000000000000000 | 3
2 | 1.6666666666666667 | 4
2 | 1.5000000000000000 | 5
2 | 1.3333333333333333 | 6
3 | 3.0000000000000000 | 1
3 | 2.6666666666666667 | 2
3 | 2.4000000000000000 | 3
3 | 1.00000000000000000000 | 4
3 | 1.00000000000000000000 | 4
4 | 3.5000000000000000 | 1
4 | 3.3333333333333333 | 2
4 | 3.2000000000000000 | 3
4 | 3.0000000000000000 | 4
4 | 1.00000000000000000000 | 5
4 | 0.00000000000000000000 | 6
5 | 3.2000000000000000 | 1
5 | 3.0000000000000000 | 2
5 | 3.0000000000000000 | 2
5 | 2.8750000000000000 | 4
5 | 2.3333333333333333 | 5
5 | 1.6000000000000000 | 6
6 | 4.0000000000000000 | 1
6 | 2.5000000000000000 | 2
6 | 2.0000000000000000 | 3
6 | 1.3333333333333333 | 4
6 | 1.00000000000000000000 | 5
(32 rows)
-- limit is not pushed down to worker !!
EXPLAIN (COSTS FALSE)
SELECT
user_id,
avg(value_1),
RANK() OVER (partition by user_id order by 1 / (1 + avg(value_1)))
FROM
users_table
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.user_id, (pg_catalog.sum(((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1)))) / pg_catalog.sum(remote_scan.rank)) DESC
-> HashAggregate
Group Key: remote_scan.user_id, remote_scan.worker_column_5
-> Custom Scan (Citus Real-Time)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> WindowAgg
-> Sort
Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1))))
-> HashAggregate
Group Key: users_table.user_id, users_table.value_2
-> Seq Scan on users_table_1400256 users_table
(16 rows)
EXPLAIN (COSTS FALSE)
SELECT
user_id,
avg(value_1),
RANK() OVER (partition by user_id order by 1 / (1 + avg(value_1)))
FROM
users_table
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.user_id, (pg_catalog.sum(((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1)))) / pg_catalog.sum(remote_scan.rank)) DESC
-> HashAggregate
Group Key: remote_scan.user_id, remote_scan.worker_column_5
-> Custom Scan (Citus Real-Time)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> WindowAgg
-> Sort
Sort Key: users_table.user_id, (('1'::numeric / ('1'::numeric + avg(users_table.value_1))))
-> HashAggregate
Group Key: users_table.user_id, users_table.value_2
-> Seq Scan on users_table_1400256 users_table
(16 rows)
EXPLAIN (COSTS FALSE)
SELECT
user_id,
avg(value_1),
RANK() OVER (partition by user_id order by 1 / (1 + sum(value_2)))
FROM
users_table
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.user_id, (pg_catalog.sum(((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1)))) / pg_catalog.sum(remote_scan.rank)) DESC
-> HashAggregate
Group Key: remote_scan.user_id, remote_scan.worker_column_5
-> Custom Scan (Citus Real-Time)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> WindowAgg
-> Sort
Sort Key: users_table.user_id, ((1 / (1 + sum(users_table.value_2))))
-> HashAggregate
Group Key: users_table.user_id, users_table.value_2
-> Seq Scan on users_table_1400256 users_table
(16 rows)
EXPLAIN (COSTS FALSE)
SELECT
user_id,
avg(value_1),
RANK() OVER (partition by user_id order by sum(value_2))
FROM
users_table
GROUP BY user_id, value_2
ORDER BY user_id, avg(value_1) DESC
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.user_id, (pg_catalog.sum(((pg_catalog.sum(remote_scan.avg) / pg_catalog.sum(remote_scan.avg_1)))) / pg_catalog.sum(remote_scan.rank)) DESC
-> HashAggregate
Group Key: remote_scan.user_id, remote_scan.worker_column_5
-> Custom Scan (Citus Real-Time)
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=57637 dbname=regression
-> WindowAgg
-> Sort
Sort Key: users_table.user_id, (sum(users_table.value_2))
-> HashAggregate
Group Key: users_table.user_id, users_table.value_2
-> Seq Scan on users_table_1400256 users_table
(16 rows)

View File

@ -1,11 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 11 AS server_verion_eleven_and_above
\gset
\if :server_verion_eleven_and_above
\else
\q
\endif
SET citus.next_shard_id TO 20030000;
CREATE USER procedureuser;

View File

@ -2,10 +2,6 @@
-- we don't mark transactions with ANALYZE as critical anymore, and
-- get WARNINGs instead of ERRORs.
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
SET citus.next_shard_id TO 12000000;
SELECT citus.mitmproxy('conn.allow()');
@ -30,7 +26,7 @@ ANALYZE vacuum_test;
-- ANALYZE transactions being critical is an open question, see #2430
-- show that we marked as INVALID on COMMIT FAILURE
SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND
SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND
shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass);
UPDATE pg_dist_shard_placement SET shardstate = 1

View File

@ -1,10 +1,8 @@
--
--
-- Tests multiple commands in transactions where
-- there is foreign key relation between reference
-- tables and distributed tables
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
CREATE SCHEMA test_fkey_to_ref_in_tx;
SET search_path TO 'test_fkey_to_ref_in_tx';
@ -50,7 +48,7 @@ BEGIN;
ROLLBACK;
-- case 1.2: SELECT to a reference table is followed by a multiple router SELECTs to a distributed table
BEGIN;
BEGIN;
SELECT count(*) FROM reference_table;
SELECT count(*) FROM on_update_fkey_table WHERE id = 15;
SELECT count(*) FROM on_update_fkey_table WHERE id = 16;
@ -58,7 +56,7 @@ BEGIN;
SELECT count(*) FROM on_update_fkey_table WHERE id = 18;
ROLLBACK;
BEGIN;
BEGIN;
SELECT count(*) FROM transitive_reference_table;
SELECT count(*) FROM on_update_fkey_table WHERE id = 15;
SELECT count(*) FROM on_update_fkey_table WHERE id = 16;
@ -95,28 +93,28 @@ BEGIN;
ROLLBACK;
-- case 1.5: SELECT to a reference table is followed by a DDL that touches fkey column
BEGIN;
BEGIN;
SELECT count(*) FROM reference_table;
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint;
ROLLBACK;
BEGIN;
BEGIN;
SELECT count(*) FROM transitive_reference_table;
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint;
ROLLBACK;
-- case 1.6: SELECT to a reference table is followed by an unrelated DDL
BEGIN;
BEGIN;
SELECT count(*) FROM reference_table;
ALTER TABLE on_update_fkey_table ADD COLUMN X INT;
ROLLBACK;
BEGIN;
BEGIN;
SELECT count(*) FROM transitive_reference_table;
ALTER TABLE on_update_fkey_table ADD COLUMN X INT;
ROLLBACK;
-- case 1.7.1: SELECT to a reference table is followed by a DDL that is on
-- case 1.7.1: SELECT to a reference table is followed by a DDL that is on
-- the foreign key column
BEGIN;
SELECT count(*) FROM reference_table;
@ -134,31 +132,31 @@ BEGIN;
ALTER TABLE on_update_fkey_table DROP COLUMN value_1 CASCADE;
ROLLBACK;
-- case 1.7.2: SELECT to a reference table is followed by a DDL that is on
-- case 1.7.2: SELECT to a reference table is followed by a DDL that is on
-- the foreign key column after a parallel query has been executed
BEGIN;
BEGIN;
SELECT count(*) FROM unrelated_dist_table;
SELECT count(*) FROM reference_table;
ALTER TABLE on_update_fkey_table DROP COLUMN value_1 CASCADE;
ROLLBACK;
BEGIN;
BEGIN;
SELECT count(*) FROM unrelated_dist_table;
SELECT count(*) FROM transitive_reference_table;
ALTER TABLE on_update_fkey_table DROP COLUMN value_1 CASCADE;
ROLLBACK;
-- case 1.7.3: SELECT to a reference table is followed by a DDL that is not on
-- case 1.7.3: SELECT to a reference table is followed by a DDL that is not on
-- the foreign key column, and a parallel query has already been executed
BEGIN;
BEGIN;
SELECT count(*) FROM unrelated_dist_table;
SELECT count(*) FROM reference_table;
ALTER TABLE on_update_fkey_table ADD COLUMN X INT;
ROLLBACK;
BEGIN;
BEGIN;
SELECT count(*) FROM unrelated_dist_table;
SELECT count(*) FROM transitive_reference_table;
ALTER TABLE on_update_fkey_table ADD COLUMN X INT;
@ -629,7 +627,7 @@ ROLLBACK;
-- an unrelated update followed by update on the reference table and update
-- on the cascading distributed table
-- note that the UPDATE on the reference table will try to set the execution
-- mode to sequential, which will fail since there is an already opened
-- mode to sequential, which will fail since there is an already opened
-- parallel connections
BEGIN;
UPDATE unrelated_dist_table SET value_1 = 15;
@ -653,7 +651,7 @@ ROLLBACK;
-- already executed a parallel query
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
SELECT create_reference_table('test_table_1');
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
SELECT create_distributed_table('tt4', 'id');
@ -671,7 +669,7 @@ ROLLBACK;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
SELECT create_reference_table('test_table_1');
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
SELECT create_distributed_table('tt4', 'id');
@ -688,7 +686,7 @@ ROLLBACK;
-- parallel connection via create_distributed_table(), later
-- adding foreign key to reference table fails
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
@ -704,7 +702,7 @@ COMMIT;
-- same test with the above on sequential mode should work fine
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY);
@ -721,7 +719,7 @@ BEGIN;
COMMIT;
-- similar test with the above, but this time the order of
-- similar test with the above, but this time the order of
-- create_distributed_table and create_reference_table is
-- changed
BEGIN;
@ -778,7 +776,7 @@ ROLLBACK;
-- make sure that we cannot create hash distributed tables with
-- foreign keys to reference tables when they have data in it
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
@ -797,7 +795,7 @@ COMMIT;
-- the same test with above in sequential mode would still not work
-- since COPY cannot be executed in sequential mode
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY);
@ -808,16 +806,16 @@ BEGIN;
SELECT create_reference_table('test_table_1');
SELECT create_distributed_table('test_table_2', 'id');
-- make sure that the output isn't too verbose
SET LOCAL client_min_messages TO ERROR;
DROP TABLE test_table_2, test_table_1;
COMMIT;
COMMIT;
-- we should be able to execute and DML/DDL/SELECT after we've
-- switched to sequential via create_distributed_table
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
@ -848,11 +846,11 @@ SELECT create_reference_table('reference_table');
CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int);
SELECT create_distributed_table('distributed_table', 'id');
ALTER TABLE
distributed_table
ADD CONSTRAINT
fkey_delete FOREIGN KEY(value_1)
REFERENCES
ALTER TABLE
distributed_table
ADD CONSTRAINT
fkey_delete FOREIGN KEY(value_1)
REFERENCES
reference_table(id) ON DELETE CASCADE;
INSERT INTO reference_table SELECT i FROM generate_series(0, 10) i;
@ -860,7 +858,7 @@ INSERT INTO distributed_table SELECT i, i % 10 FROM generate_series(0, 100) i;
-- this query returns 100 rows in Postgres, but not in Citus
-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion
WITH t1 AS (DELETE FROM reference_table RETURNING id)
WITH t1 AS (DELETE FROM reference_table RETURNING id)
DELETE FROM distributed_table USING t1 WHERE value_1 = t1.id RETURNING *;
-- load some more data for one more test with real-time selects
@ -869,10 +867,10 @@ INSERT INTO distributed_table SELECT i, i % 10 FROM generate_series(0, 100) i;
-- this query returns 100 rows in Postgres, but not in Citus
-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion
WITH t1 AS (DELETE FROM reference_table RETURNING id)
WITH t1 AS (DELETE FROM reference_table RETURNING id)
SELECT count(*) FROM distributed_table, t1 WHERE value_1 = t1.id;
-- this query should fail since we first to a parallel access to a distributed table
-- this query should fail since we first to a parallel access to a distributed table
-- with t1, and then access to t2
WITH t1 AS (DELETE FROM distributed_table RETURNING id),
t2 AS (DELETE FROM reference_table RETURNING id)
@ -887,7 +885,7 @@ WITH t1 AS (DELETE FROM distributed_table RETURNING id)
-- finally, make sure that we can execute the same queries
-- in the sequential mode
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
WITH t1 AS (DELETE FROM distributed_table RETURNING id),
@ -896,7 +894,7 @@ BEGIN;
ROLLBACK;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
WITH t1 AS (DELETE FROM distributed_table RETURNING id)

View File

@ -1,8 +1,6 @@
--
-- FOREIGN_KEY_TO_REFERENCE_TABLE
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
CREATE SCHEMA fkey_reference_table;
SET search_path TO 'fkey_reference_table';
@ -13,12 +11,12 @@ SET citus.next_placement_id TO 7000000;
CREATE TYPE foreign_details AS (name text, relid text, refd_relid text);
CREATE VIEW table_fkeys_in_workers AS
CREATE VIEW table_fkeys_in_workers AS
SELECT
(json_populate_record(NULL::foreign_details,
json_array_elements_text((run_command_on_workers( $$
(json_populate_record(NULL::foreign_details,
json_array_elements_text((run_command_on_workers( $$
SELECT
COALESCE(json_agg(row_to_json(d)), '[]'::json)
COALESCE(json_agg(row_to_json(d)), '[]'::json)
FROM
(
SELECT
@ -175,7 +173,7 @@ COMMIT;
SELECT * FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%' ORDER BY 1,2,3;
DROP TABLE referencing_table;
-- foreign keys are supported either in between distributed tables including the
-- foreign keys are supported either in between distributed tables including the
-- distribution column or from distributed tables to reference tables.
CREATE TABLE referencing_table(id int, ref_id int);
SELECT create_distributed_table('referencing_table', 'ref_id', 'append');
@ -271,7 +269,7 @@ DROP TABLE referencing_table;
DROP TABLE referenced_table;
-- foreign key as composite key
CREATE TYPE fkey_reference_table.composite AS (key1 int, key2 int);
CREATE TYPE fkey_reference_table.composite AS (key1 int, key2 int);
CREATE TABLE referenced_table(test_column composite, PRIMARY KEY(test_column));
CREATE TABLE referencing_table(id int, referencing_composite composite);
@ -289,7 +287,7 @@ DROP TABLE referenced_table CASCADE;
DROP TABLE referencing_table CASCADE;
-- In the following test, we'll use a SERIAL column as the referenced column
-- in the foreign constraint. We'll first show that and insert on non-serial
-- in the foreign constraint. We'll first show that and insert on non-serial
-- column successfully inserts into the serial and referenced column.
-- Accordingly, the inserts into the referencing table which references to the
-- serial column will be successful.
@ -309,9 +307,9 @@ DROP TABLE referenced_table CASCADE;
DROP TABLE referencing_table CASCADE;
-- In the following test, we'll use a SERIAL column as the referencing column
-- in the foreign constraint. We'll first show that the values that exist
-- in the foreign constraint. We'll first show that the values that exist
-- in the referenced tables are successfully generated by the serial column
-- and inserted to the distributed table. However, if the values that are generated
-- and inserted to the distributed table. However, if the values that are generated
-- by serial column do not exist on the referenced table, the query fails.
CREATE TABLE referenced_table(test_column int PRIMARY KEY, test_column2 int);
CREATE TABLE referencing_table(id int, ref_id SERIAL);
@ -329,10 +327,10 @@ DROP TABLE referenced_table CASCADE;
DROP TABLE referencing_table CASCADE;
-- In the following test, we'll use a SERIAL column as the referencing column
-- and referenced columns in a foreign constraint. We'll first show that the
-- the inserts into referenced column will successfully generate and insert
-- and referenced columns in a foreign constraint. We'll first show that the
-- the inserts into referenced column will successfully generate and insert
-- data into serial column. Then, we will be successfully insert the same amount
-- of data into referencing table. However, if the values that are generated
-- of data into referencing table. However, if the values that are generated
-- by serial column do not exist on the referenced table, the query fails.
CREATE TABLE referenced_table(test_column SERIAL PRIMARY KEY, test_column2 int);
CREATE TABLE referencing_table(id int, ref_id SERIAL);
@ -364,7 +362,7 @@ INSERT INTO referencing_table SELECT x,(random()*1000)::int FROM generate_series
DROP TABLE referenced_table CASCADE;
DROP TABLE referencing_table CASCADE;
-- In the following tests, we create a foreign constraint with
-- In the following tests, we create a foreign constraint with
-- ON UPDATE CASCADE and see if it works properly with cascading upsert
CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column));
CREATE TABLE referencing_table(id int, ref_id int DEFAULT -1);
@ -400,11 +398,11 @@ COMMIT;
DROP TABLE referenced_table CASCADE;
DROP TABLE referencing_table CASCADE;
-- Chained references
-- Chained references
-- In the following test, we create foreign keys from one column in a distributed
-- table to two reference tables. We expect to see that even if a data exist in
-- one reference table, it is not going to be inserted in to referencing table
-- because of lack of the key in the other table. Data can only be inserted into
-- because of lack of the key in the other table. Data can only be inserted into
-- referencing table if it exists in both referenced tables.
-- Additionally, delete or update in one referenced table should cascade properly.
CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column));
@ -455,10 +453,10 @@ DROP TABLE referenced_table2 CASCADE;
DROP TABLE referencing_table CASCADE;
-- In the following test, we create foreign keys from two columns in a distributed
-- table to two reference tables separately. We expect to see that even if a data
-- table to two reference tables separately. We expect to see that even if a data
-- exist in one reference table for one column, it is not going to be inserted in
-- to referencing table because the other constraint doesn't hold. Data can only
-- be inserted into referencing table if both columns exist in respective columns
-- to referencing table because the other constraint doesn't hold. Data can only
-- be inserted into referencing table if both columns exist in respective columns
-- in referenced tables.
-- Additionally, delete or update in one referenced table should cascade properly.
CREATE TABLE referenced_table(test_column int, test_column2 int, PRIMARY KEY(test_column));
@ -517,8 +515,8 @@ DROP TABLE referencing_table CASCADE;
-- two distributed tables are referencing to one reference table and
-- in the same time the distributed table 2 is referencing to
-- distributed table 1. Thus, we have a triangular
-- in the same time the distributed table 2 is referencing to
-- distributed table 1. Thus, we have a triangular
-- distributed table 1 has a foreign key from the distribution column to reference table
-- distributed table 2 has a foreign key from a non-distribution column to reference table
-- distributed table 2 has a foreign key to distributed table 1 on the distribution column
@ -580,7 +578,7 @@ DROP TABLE referencing_table CASCADE;
DROP TABLE referencing_table2 CASCADE;
\set VERBOSITY default
-- In this test we have a chained relationship in form of
-- In this test we have a chained relationship in form of
-- distributed table (referencing_referencing_table) has a foreign key with two columns
-- to another distributed table (referencing_table)
-- referencing_table has another foreign key with 2 columns to referenced_table.
@ -607,11 +605,11 @@ DROP TABLE referencing_table CASCADE;
DROP TABLE referencing_referencing_table;
-- test if create_distributed_table works in transactions with some edge cases
-- the following checks if create_distributed_table works on foreign keys when
-- the following checks if create_distributed_table works on foreign keys when
-- one of them is a self-referencing table of multiple distributed tables
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1');
SELECT create_reference_table('test_table_1');
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES test_table_1(id));
SELECT create_distributed_table('test_table_2', 'id');
@ -650,7 +648,7 @@ ROLLBACK;
-- make sure that we fail if we need parallel data load
BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY);
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
@ -821,7 +819,7 @@ SELECT create_reference_table('test_table_1');
SELECT create_distributed_table('test_table_2', 'id');
INSERT INTO test_table_1 VALUES (1),(2),(3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
TRUNCATE test_table_1 CASCADE;
SELECT * FROM test_table_2;
@ -834,7 +832,7 @@ SELECT create_reference_table('test_table_1');
SELECT create_distributed_table('test_table_2', 'id');
INSERT INTO test_table_1 VALUES (1),(2),(3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
BEGIN;
TRUNCATE test_table_1 CASCADE;
@ -865,7 +863,7 @@ SELECT create_reference_table('test_table_1');
SELECT create_distributed_table('test_table_2', 'id');
INSERT INTO test_table_1 VALUES (1),(2),(3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
TRUNCATE test_table_2 CASCADE;
SELECT * FROM test_table_2;
@ -879,7 +877,7 @@ SELECT create_reference_table('test_table_1');
SELECT create_distributed_table('test_table_2', 'id');
INSERT INTO test_table_1 VALUES (1),(2),(3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
INSERT INTO test_table_2 VALUES (1,1),(2,2),(3,3);
BEGIN;
TRUNCATE test_table_2 CASCADE;
COMMIT;
@ -889,7 +887,7 @@ SELECT * FROM test_table_1;
DROP TABLE test_table_1, test_table_2;
-- check if we successfuly set multi_shard_modify_mode to sequential after sequentially running DDLs
-- in transaction since the upcoming DDLs need to run sequentially.
-- in transaction since the upcoming DDLs need to run sequentially.
CREATE TABLE test_table_1(id int PRIMARY KEY);
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
CREATE TABLE test_table_3(id int PRIMARY KEY, value_1 int);

View File

@ -2,10 +2,6 @@
-- MULTI_CREATE_TABLE_NEW_FEATURES
--
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
-- Verify that the GENERATED ... AS IDENTITY feature in PostgreSQL 10
-- is forbidden in distributed tables.

View File

@ -1,10 +1,3 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 11 AS server_verion_eleven_and_above
\gset
\if :server_verion_eleven_and_above
\else
\q
\endif
--
-- Regression tests for deparsing ALTER/DROP PROCEDURE Queries
--
@ -26,13 +19,13 @@ SELECT substring(:'server_version', '\d+')::int >= 11 AS server_verion_eleven_an
-- SET configuration_parameter FROM CURRENT
-- RESET configuration_parameter
-- RESET ALL
--
--
-- DROP PROCEDURE [ IF EXISTS ] name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] [, ...]
-- [ CASCADE | RESTRICT ]
--
--
-- Please note that current deparser does not return errors on some invalid queries.
--
-- For example CALLED ON NULL INPUT action is valid only for FUNCTIONS, but we still
--
-- For example CALLED ON NULL INPUT action is valid only for FUNCTIONS, but we still
-- allow deparsing them here.
SET citus.next_shard_id TO 20030000;
@ -43,10 +36,6 @@ SET search_path TO procedure_tests;
SET citus.shard_count TO 4;
SET client_min_messages TO INFO;
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
CREATE FUNCTION deparse_test(text)
RETURNS text
AS 'citus'
@ -56,7 +45,7 @@ CREATE FUNCTION deparse_and_run_on_workers(text)
RETURNS SETOF record
AS $fnc$
WITH deparsed_query AS ( SELECT deparse_test($1) qualified_query )
SELECT run_command_on_workers(qualified_query) FROM deparsed_query d
SELECT run_command_on_workers(qualified_query) FROM deparsed_query d
$fnc$
LANGUAGE SQL;

View File

@ -4,10 +4,6 @@
SET citus.next_shard_id TO 570000;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
\a\t
RESET citus.task_executor_type;
@ -390,12 +386,12 @@ SELECT true AS valid FROM explain_xml($$
SELECT true AS valid FROM explain_json($$
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
-- Test multi shard update
EXPLAIN (COSTS FALSE)
UPDATE lineitem_hash_part
SET l_suppkey = 12;
EXPLAIN (COSTS FALSE)
UPDATE lineitem_hash_part
SET l_suppkey = 12
@ -468,8 +464,8 @@ SELECT true AS valid FROM explain_xml($$
AND o_custkey = c_custkey
AND l_suppkey = s_suppkey$$);
-- make sure that EXPLAIN works without
-- problems for queries that inlvolves only
-- make sure that EXPLAIN works without
-- problems for queries that inlvolves only
-- reference tables
SELECT true AS valid FROM explain_xml($$
SELECT count(*)

View File

@ -5,9 +5,6 @@
-- Check that we can run CREATE INDEX and DROP INDEX statements on distributed
-- tables.
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS server_version_above_ten;
--
-- CREATE TEST TABLES
--

View File

@ -1,7 +1,6 @@
-- if the output of following query changes, we might need to change
-- some heap_getattr() calls to heap_deform_tuple(). This errors out in
-- postgres versions before 11.
-- some heap_getattr() calls to heap_deform_tuple().
SELECT attrelid::regclass, attname, atthasmissing, attmissingval
FROM pg_attribute
WHERE atthasmissing

View File

@ -4,10 +4,6 @@
-- Test user permissions.
--
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
SET citus.next_shard_id TO 1420000;
SET citus.shard_replication_factor TO 1;
@ -300,14 +296,14 @@ SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE pron
SELECT wait_until_metadata_sync();
-- now, make sure that the user can use the function
-- now, make sure that the user can use the function
-- created in the transaction
BEGIN;
CREATE FUNCTION usage_access_func_second(key int, variadic v int[]) RETURNS text
LANGUAGE plpgsql AS 'begin return current_user; end;';
SELECT create_distributed_function('usage_access_func_second(int,int[])', '$1');
SELECT usage_access_func_second(1, 2,3,4,5) FROM full_access_user_schema.t1 LIMIT 1;
SELECT usage_access_func_second(1, 2,3,4,5) FROM full_access_user_schema.t1 LIMIT 1;
ROLLBACK;

View File

@ -6,9 +6,6 @@ SET citus.next_shard_id TO 1660000;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS server_version_above_ten;
--
-- Distributed Partitioned Table Creation Tests
--
@ -49,34 +46,34 @@ SELECT * FROM partitioning_test ORDER BY 1;
SELECT * FROM partitioning_hash_test ORDER BY 1;
-- see partitioned table and its partitions are distributed
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010')
ORDER BY 1;
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010')
GROUP BY
logicalrelid
ORDER BY
1,2;
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
logicalrelid IN ('partitioning_hash_test', 'partitioning_hash_test_0', 'partitioning_hash_test_1')
ORDER BY 1;
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
WHERE logicalrelid IN ('partitioning_hash_test', 'partitioning_hash_test_0', 'partitioning_hash_test_1')
GROUP BY
logicalrelid
@ -87,17 +84,17 @@ ORDER BY
CREATE TABLE partitioning_test_2011 PARTITION OF partitioning_test FOR VALUES FROM ('2011-01-01') TO ('2012-01-01');
-- new partition is automatically distributed as well
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
logicalrelid IN ('partitioning_test', 'partitioning_test_2011')
ORDER BY 1;
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011')
GROUP BY
logicalrelid
@ -114,17 +111,17 @@ INSERT INTO partitioning_test_2012 VALUES (6, '2012-07-07');
ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2012 FOR VALUES FROM ('2012-01-01') TO ('2013-01-01');
-- attached partition is distributed as well
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
logicalrelid IN ('partitioning_test', 'partitioning_test_2012')
ORDER BY 1;
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
SELECT
logicalrelid, count(*)
FROM pg_dist_shard
WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012')
GROUP BY
logicalrelid
@ -391,7 +388,7 @@ SELECT
FROM
information_schema.table_constraints
WHERE
table_name = 'partitioning_test_2009' AND
table_name = 'partitioning_test_2009' AND
constraint_name = 'partitioning_2009_primary';
-- however, you can add primary key if it contains both distribution and partition key
@ -473,9 +470,9 @@ SELECT right(table_name, 7)::int as shardid, * FROM (
json_array_elements_text(result::json)::json )).*
FROM run_command_on_workers($$
SELECT
COALESCE(json_agg(row_to_json(q)), '[]'::json)
COALESCE(json_agg(row_to_json(q)), '[]'::json)
FROM (
SELECT
SELECT
table_name, constraint_name, constraint_type
FROM information_schema.table_constraints
WHERE
@ -639,40 +636,40 @@ SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType
FROM
(SELECT *, random()
FROM
(SELECT
(SELECT
"t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types"
FROM
(SELECT
(SELECT
"t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events
FROM(
(SELECT
(SELECT
"events"."user_id", "events"."time", 0 AS event
FROM
FROM
partitioned_events_table as "events"
WHERE
WHERE
event_type IN (1, 2) )
UNION
(SELECT
UNION
(SELECT
"events"."user_id", "events"."time", 1 AS event
FROM
FROM
partitioned_events_table as "events"
WHERE
WHERE
event_type IN (3, 4) )
UNION
(SELECT
UNION
(SELECT
"events"."user_id", "events"."time", 2 AS event
FROM
FROM
partitioned_events_table as "events"
WHERE
WHERE
event_type IN (5, 6) )
UNION
(SELECT
UNION
(SELECT
"events"."user_id", "events"."time", 3 AS event
FROM
FROM
partitioned_events_table as "events"
WHERE
WHERE
event_type IN (1, 6))) t1
GROUP BY "t1"."user_id") AS t) "q"
GROUP BY "t1"."user_id") AS t) "q"
) AS final_query
GROUP BY types
ORDER BY types;
@ -680,73 +677,73 @@ ORDER BY types;
-- UNION and JOIN on both partitioned and regular tables
SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType
FROM
(SELECT
(SELECT
*, random()
FROM
(SELECT
(SELECT
"t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types"
FROM
(SELECT
(SELECT
"t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events
FROM (
(SELECT
(SELECT
*
FROM
(SELECT
(SELECT
"events"."time", 0 AS event, "events"."user_id"
FROM
FROM
partitioned_events_table as "events"
WHERE
event_type IN (1, 2)) events_subquery_1)
UNION
WHERE
event_type IN (1, 2)) events_subquery_1)
UNION
(SELECT *
FROM
(
SELECT * FROM
(
SELECT
SELECT
max("events"."time"),
0 AS event,
"events"."user_id"
FROM
FROM
events_table as "events", users_table as "users"
WHERE
WHERE
events.user_id = users.user_id AND
event_type IN (1, 2)
GROUP BY "events"."user_id"
) as events_subquery_5
) events_subquery_2)
UNION
UNION
(SELECT *
FROM
(SELECT
(SELECT
"events"."time", 2 AS event, "events"."user_id"
FROM
FROM
partitioned_events_table as "events"
WHERE
WHERE
event_type IN (3, 4)) events_subquery_3)
UNION
UNION
(SELECT *
FROM
(SELECT
"events"."time", 3 AS event, "events"."user_id"
FROM
FROM
events_table as "events"
WHERE
WHERE
event_type IN (5, 6)) events_subquery_4)
) t1
GROUP BY "t1"."user_id") AS t) "q"
GROUP BY "t1"."user_id") AS t) "q"
INNER JOIN
(SELECT
(SELECT
"users"."user_id"
FROM
FROM
partitioned_users_table as "users"
WHERE
value_1 > 2 and value_1 < 5) AS t
WHERE
value_1 > 2 and value_1 < 5) AS t
ON (t.user_id = q.user_id)) as final_query
GROUP BY
GROUP BY
types
ORDER BY
ORDER BY
types;
-- test LIST partitioning
@ -778,37 +775,37 @@ WHERE
SELECT
count(*) AS cnt, "generated_group_field"
FROM
(SELECT
(SELECT
"eventQuery"."user_id", random(), generated_group_field
FROM
(SELECT
(SELECT
"multi_group_wrapper_1".*, generated_group_field, random()
FROM
(SELECT *
FROM
(SELECT
(SELECT
"list_partitioned_events_table"."time", "list_partitioned_events_table"."user_id" as event_user_id
FROM
FROM
list_partitioned_events_table as "list_partitioned_events_table"
WHERE
WHERE
user_id > 2) "temp_data_queries"
INNER JOIN
(SELECT
(SELECT
"users"."user_id"
FROM
FROM
partitioned_users_table as "users"
WHERE
user_id > 2 and value_2 = 1) "user_filters_1"
WHERE
user_id > 2 and value_2 = 1) "user_filters_1"
ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1"
LEFT JOIN
(SELECT
(SELECT
"users"."user_id" AS "user_id", value_2 AS "generated_group_field"
FROM
FROM
partitioned_users_table as "users") "left_group_by_1"
ON ("left_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery"
ON ("left_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery"
GROUP BY
"generated_group_field"
ORDER BY
ORDER BY
cnt DESC, generated_group_field ASC
LIMIT 10;
@ -1051,18 +1048,18 @@ CREATE TABLE partitioning_schema."schema-test_2009"(id int, time date);
ALTER TABLE partitioning_schema."schema-test" ATTACH PARTITION partitioning_schema."schema-test_2009" FOR VALUES FROM ('2009-01-01') TO ('2010-01-01');
-- attached partition is distributed as well
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass)
ORDER BY 1;
SELECT
logicalrelid, count(*)
SELECT
logicalrelid, count(*)
FROM
pg_dist_shard
pg_dist_shard
WHERE
logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass)
GROUP BY
@ -1078,18 +1075,18 @@ SELECT create_distributed_table('partitioning_schema."schema-test"', 'id');
CREATE TABLE partitioning_schema."schema-test_2009" PARTITION OF partitioning_schema."schema-test" FOR VALUES FROM ('2009-01-01') TO ('2010-01-01');
-- newly created partition is distributed as well
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass)
ORDER BY 1;
SELECT
logicalrelid, count(*)
SELECT
logicalrelid, count(*)
FROM
pg_dist_shard
pg_dist_shard
WHERE
logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass)
GROUP BY
@ -1106,18 +1103,18 @@ SELECT create_distributed_table('"schema-test"', 'id');
CREATE TABLE partitioning_schema."schema-test_2009" PARTITION OF "schema-test" FOR VALUES FROM ('2009-01-01') TO ('2010-01-01');
-- newly created partition is distributed as well
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
SELECT
logicalrelid
FROM
pg_dist_partition
WHERE
logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass)
ORDER BY 1;
SELECT
logicalrelid, count(*)
SELECT
logicalrelid, count(*)
FROM
pg_dist_shard
pg_dist_shard
WHERE
logicalrelid IN ('partitioning_schema."schema-test"'::regclass, 'partitioning_schema."schema-test_2009"'::regclass)
GROUP BY

View File

@ -1,7 +1,3 @@
-- This test has different output per major version
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
-- ===================================================================
-- create test functions
-- ===================================================================
@ -120,8 +116,8 @@ CREATE TABLE date_partitioned_table_100 (id int, time date) PARTITION BY RANGE (
CREATE TABLE date_partition_2007_100 (id int, time date );
-- now create the partitioning hierarcy
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',
referenced_shard:=100, referenced_schema_name:='public',
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',
referenced_shard:=100, referenced_schema_name:='public',
command:='ALTER TABLE date_partitioned_table ATTACH PARTITION date_partition_2007 FOR VALUES FROM (''2007-01-01'') TO (''2008-01-02'')' );
-- the hierarcy is successfully created
@ -131,14 +127,14 @@ SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_
SELECT master_get_table_ddl_events('date_partition_2007_100');
-- now break the partitioning hierarcy
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',
referenced_shard:=100, referenced_schema_name:='public',
SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public',
referenced_shard:=100, referenced_schema_name:='public',
command:='ALTER TABLE date_partitioned_table DETACH PARTITION date_partition_2007' );
-- the hierarcy is successfully broken
\d+ date_partitioned_table_100
-- now lets have some more complex partitioning hierarcies with
-- now lets have some more complex partitioning hierarcies with
-- tables on different schemas and constraints on the tables
CREATE SCHEMA partition_parent_schema;
@ -256,14 +252,14 @@ CREATE TABLE capitals (
-- returns true since capitals inherits from cities
SELECT table_inherits('capitals');
-- although date_partition_2006 inherits from its parent
-- although date_partition_2006 inherits from its parent
-- returns false since the hierarcy is formed via partitioning
SELECT table_inherits('date_partition_2006');
-- returns true since cities inherited by capitals
SELECT table_inherited('cities');
-- although date_partitioned_table inherited by its partitions
-- although date_partitioned_table inherited by its partitions
-- returns false since the hierarcy is formed via partitioning
SELECT table_inherited('date_partitioned_table');

View File

@ -10,10 +10,6 @@
SET citus.next_shard_id TO 690000;
SET citus.enable_unique_job_ids TO off;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
BEGIN;
SET client_min_messages TO DEBUG4;
SET citus.task_executor_type TO 'task-tracker';

View File

@ -9,11 +9,6 @@
SET citus.next_shard_id TO 710000;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
BEGIN;
SET client_min_messages TO DEBUG3;
SET citus.task_executor_type TO 'task-tracker';

View File

@ -5,13 +5,9 @@
SET citus.next_shard_id TO 880000;
-- print whether we're using version > 9 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine;
-- the function simply parses the results and returns 'shardId@worker'
-- for all the explain task outputs
CREATE OR REPLACE FUNCTION parse_explain_output(in qry text, in table_name text, out r text)
CREATE OR REPLACE FUNCTION parse_explain_output(in qry text, in table_name text, out r text)
RETURNS SETOF TEXT AS $$
DECLARE
portOfTheTask text;
@ -128,10 +124,10 @@ RESET client_min_messages;
-- Now, lets test round-robin policy
-- round-robin policy relies on PostgreSQL's local transactionId,
-- round-robin policy relies on PostgreSQL's local transactionId,
-- which might change and we don't have any control over it.
-- the important thing that we look for is that round-robin policy
-- should give the same output for executions in the same transaction
-- the important thing that we look for is that round-robin policy
-- should give the same output for executions in the same transaction
-- and different output for executions that are not inside the
-- same transaction. To ensure that, we define a helper function
BEGIN;
@ -141,9 +137,9 @@ SET LOCAL citus.explain_distributed_queries TO on;
CREATE TEMPORARY TABLE explain_outputs (value text);
SET LOCAL citus.task_assignment_policy TO 'round-robin';
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_reference_table;', 'task_assignment_reference_table');
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_reference_table;', 'task_assignment_reference_table');
-- given that we're in the same transaction, the count should be 1
@ -157,9 +153,9 @@ COMMIT;
SET citus.task_assignment_policy TO 'round-robin';
SET citus.explain_distributed_queries TO ON;
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_reference_table;', 'task_assignment_reference_table');
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_reference_table;', 'task_assignment_reference_table');
-- given that we're in the same transaction, the count should be 2
@ -169,7 +165,7 @@ TRUNCATE explain_outputs;
-- same test with a distributed table
-- we keep this test because as of this commit, the code
-- paths for reference tables and distributed tables are
-- paths for reference tables and distributed tables are
-- not the same
SET citus.shard_replication_factor TO 2;
@ -181,9 +177,9 @@ BEGIN;
SET LOCAL citus.explain_distributed_queries TO on;
SET LOCAL citus.task_assignment_policy TO 'round-robin';
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_replicated_hash;', 'task_assignment_replicated_hash');
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_replicated_hash;', 'task_assignment_replicated_hash');
-- given that we're in the same transaction, the count should be 1
@ -197,9 +193,9 @@ COMMIT;
SET citus.task_assignment_policy TO 'round-robin';
SET citus.explain_distributed_queries TO ON;
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_replicated_hash;', 'task_assignment_replicated_hash');
INSERT INTO explain_outputs
INSERT INTO explain_outputs
SELECT parse_explain_output('EXPLAIN SELECT count(*) FROM task_assignment_replicated_hash;', 'task_assignment_replicated_hash');
-- given that we're in the same transaction, the count should be 2

View File

@ -1,8 +1,5 @@
SET citus.next_shard_id TO 990000;
-- print server version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 as version_above_ten;
-- ===================================================================
-- test utility statement functionality

View File

@ -7,10 +7,6 @@
-- router queries, single row inserts, multi row inserts via insert
-- into select, multi row insert via copy commands.
-- print whether we're using version > 10 to make version-specific tests clear
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten;
SELECT count(*) FROM lineitem_hash_part;
SELECT count(*) FROM orders_hash_part;
@ -154,10 +150,10 @@ SELECT * FROM lineitems_by_shipping_method ORDER BY 1,2 LIMIT 5;
-- create a view with group by on partition column
CREATE VIEW lineitems_by_orderkey AS
SELECT
l_orderkey, count(*)
FROM
lineitem_hash_part
SELECT
l_orderkey, count(*)
FROM
lineitem_hash_part
GROUP BY 1;
-- this should work since we're able to push down this query
@ -181,7 +177,7 @@ DROP VIEW priority_orders;
CREATE VIEW recent_users AS
SELECT user_id, max(time) as lastseen FROM users_table
GROUP BY user_id
HAVING max(time) > '2017-11-23 16:20:33.264457'::timestamp order by 2 DESC;
HAVING max(time) > '2017-11-23 16:20:33.264457'::timestamp order by 2 DESC;
SELECT * FROM recent_users ORDER BY 2 DESC, 1 DESC;
-- create a view for recent_events
@ -194,16 +190,16 @@ SELECT count(*) FROM recent_events;
-- count number of events of recent_users
SELECT count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id);
-- count number of events of per recent users order by count
SELECT ru.user_id, count(*)
FROM recent_users ru
SELECT ru.user_id, count(*)
FROM recent_users ru
JOIN events_table et
ON (ru.user_id = et.user_id)
GROUP BY ru.user_id
ORDER BY 2 DESC, 1;
-- the same query with a left join however, it would still generate the same result
SELECT ru.user_id, count(*)
FROM recent_users ru
SELECT ru.user_id, count(*)
FROM recent_users ru
LEFT JOIN events_table et
ON (ru.user_id = et.user_id)
GROUP BY ru.user_id
@ -211,8 +207,8 @@ SELECT ru.user_id, count(*)
-- query wrapped inside a subquery, it needs another top level order by
SELECT * FROM
(SELECT ru.user_id, count(*)
FROM recent_users ru
(SELECT ru.user_id, count(*)
FROM recent_users ru
JOIN events_table et
ON (ru.user_id = et.user_id)
GROUP BY ru.user_id
@ -222,8 +218,8 @@ ORDER BY 2 DESC, 1;
-- non-partition key joins are supported inside subquery
-- via pull-push execution
SELECT * FROM
(SELECT ru.user_id, count(*)
FROM recent_users ru
(SELECT ru.user_id, count(*)
FROM recent_users ru
JOIN events_table et
ON (ru.user_id = et.event_type)
GROUP BY ru.user_id
@ -238,7 +234,7 @@ SELECT ru.user_id FROM recent_users ru JOIN recent_events re USING(user_id) GROU
-- recent_events who are not done by recent users
SELECT count(*) FROM (
SELECT re.*, ru.user_id AS recent_user
FROM recent_events re LEFT JOIN recent_users ru USING(user_id)) reu
FROM recent_events re LEFT JOIN recent_users ru USING(user_id)) reu
WHERE recent_user IS NULL;
-- same query with anti-join
@ -289,7 +285,7 @@ SELECT et.user_id, et.time FROM events_table et WHERE et.user_id IN (SELECT user
SELECT count(*) FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users WHERE user_id = 1);
-- union between views is supported through recursive planning
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users)
ORDER BY 1;
@ -297,7 +293,7 @@ ORDER BY 1;
-- wrapping it inside a SELECT * works
SELECT *
FROM (
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0
@ -306,7 +302,7 @@ SELECT *
-- union all also works for views
SELECT *
FROM (
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION ALL
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0
@ -314,7 +310,7 @@ SELECT *
SELECT count(*)
FROM (
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0;
@ -322,7 +318,7 @@ SELECT count(*)
-- UNION ALL between views is supported through recursive planning
SELECT count(*)
FROM (
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION ALL
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 2 AND user_id > 0;
@ -333,7 +329,7 @@ SELECT count(*)
(SELECT user_id FROM (SELECT user_id, max(time) as lastseen FROM users_table
GROUP BY user_id
HAVING max(time) > '2017-11-22 05:45:49.978738'::timestamp order by 2 DESC) aa
)
)
UNION
(SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u
WHERE user_id < 2 AND user_id > 0;
@ -343,7 +339,7 @@ SELECT count(*)
(SELECT user_id FROM (SELECT user_id, max(time) as lastseen FROM users_table
GROUP BY user_id
HAVING max(time) > '2017-11-22 05:45:49.978738'::timestamp order by 2 DESC) aa
)
)
UNION ALL
(SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 1 and value_1 < 3) bb) ) u
WHERE user_id < 2 AND user_id > 0;
@ -411,7 +407,7 @@ EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER
EXPLAIN (COSTS FALSE) SELECT *
FROM (
(SELECT user_id FROM recent_users)
(SELECT user_id FROM recent_users)
UNION
(SELECT user_id FROM selected_users) ) u
WHERE user_id < 4 AND user_id > 1

View File

@ -2,8 +2,6 @@
---
--- tests around access tracking within transaction blocks
---
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 10 AS version_ten_or_above;
CREATE SCHEMA access_tracking;
SET search_path TO 'access_tracking';
@ -49,7 +47,7 @@ BEGIN
RETURN 'not_accessed';
ELSIF relationShardAccess = 1 THEN
RETURN 'reference_table_access';
ELSE
ELSE
RETURN 'parallel_access';
END IF;
END;
@ -57,12 +55,12 @@ $$ LANGUAGE 'plpgsql' IMMUTABLE;
CREATE VIEW relation_acesses AS
SELECT table_name,
CREATE VIEW relation_acesses AS
SELECT table_name,
relation_access_mode_to_text(table_name, relation_select_access_mode(table_name::regclass)) as select_access,
relation_access_mode_to_text(table_name, relation_dml_access_mode(table_name::regclass)) as dml_access,
relation_access_mode_to_text(table_name, relation_ddl_access_mode(table_name::regclass)) as ddl_access
FROM
FROM
((SELECT 'table_' || i as table_name FROM generate_series(1, 7) i) UNION (SELECT 'partitioning_test') UNION (SELECT 'partitioning_test_2009') UNION (SELECT 'partitioning_test_2010')) tables;
SET citus.shard_replication_factor TO 1;
@ -102,7 +100,7 @@ COMMIT;
SELECT count(*) FROM table_1;
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
-- a very simple test that first checks sequential
-- a very simple test that first checks sequential
-- and parallel SELECTs,DMLs, and DDLs
BEGIN;
SELECT * FROM relation_acesses WHERE table_name = 'table_1';
@ -143,12 +141,12 @@ ROLLBACK;
-- a simple join touches single shard per table
BEGIN;
SELECT
count(*)
FROM
SELECT
count(*)
FROM
table_1, table_2, table_3, table_4, table_5
WHERE
table_1.key = table_2.key AND table_2.key = table_3.key AND
table_1.key = table_2.key AND table_2.key = table_3.key AND
table_3.key = table_4.key AND table_4.key = table_5.key AND
table_1.key = 1;
@ -157,9 +155,9 @@ ROLLBACK;
-- a simple real-time join touches all shard per table
BEGIN;
SELECT
count(*)
FROM
SELECT
count(*)
FROM
table_1, table_2
WHERE
table_1.key = table_2.key;
@ -172,9 +170,9 @@ ROLLBACK;
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT
count(*)
FROM
SELECT
count(*)
FROM
table_1, table_2
WHERE
table_1.key = table_2.key;
@ -184,17 +182,17 @@ ROLLBACK;
-- a simple subquery pushdown that touches all shards
BEGIN;
SELECT
count(*)
FROM
SELECT
count(*)
FROM
(
SELECT
SELECT
random()
FROM
FROM
table_1, table_2, table_3, table_4, table_5
WHERE
table_1.key = table_2.key AND table_2.key = table_3.key AND
table_1.key = table_2.key AND table_2.key = table_3.key AND
table_3.key = table_4.key AND table_4.key = table_5.key
) as foo;
@ -202,7 +200,7 @@ BEGIN;
ROLLBACK;
-- simple multi shard update both sequential and parallel modes
-- note that in multi shard modify mode we always add select
-- note that in multi shard modify mode we always add select
-- access for all the shards accessed. But, sequential mode is OK
BEGIN;
UPDATE table_1 SET value = 15;
@ -214,8 +212,8 @@ ROLLBACK;
-- now UPDATE/DELETE with subselect pushdown
BEGIN;
UPDATE
table_1 SET value = 15
UPDATE
table_1 SET value = 15
WHERE key IN (SELECT key FROM table_2 JOIN table_3 USING (key) WHERE table_2.value = 15);
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1;
ROLLBACK;
@ -239,22 +237,22 @@ BEGIN;
INSERT INTO table_2 SELECT * FROM table_1 OFFSET 0;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
ROLLBACK;
-- recursively planned SELECT
BEGIN;
SELECT
count(*)
FROM
SELECT
count(*)
FROM
(
SELECT
SELECT
random()
FROM
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
table_1.key = table_2.key
OFFSET 0
) as foo;
@ -264,35 +262,35 @@ ROLLBACK;
-- recursively planned SELECT and coordinator INSERT .. SELECT
BEGIN;
INSERT INTO table_3 (key)
SELECT
SELECT
*
FROM
FROM
(
SELECT
SELECT
random() * 1000
FROM
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
table_1.key = table_2.key
OFFSET 0
) as foo;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2', 'table_3') ORDER BY 1;
ROLLBACK;
-- recursively planned SELECT and coordinator INSERT .. SELECT
-- recursively planned SELECT and coordinator INSERT .. SELECT
-- but modifies single shard, marked as sequential operation
BEGIN;
INSERT INTO table_3 (key)
SELECT
SELECT
*
FROM
FROM
(
SELECT
SELECT
random() * 1000
FROM
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
@ -307,16 +305,16 @@ ROLLBACK;
BEGIN;
DELETE FROM table_3 where key IN
(
SELECT
SELECT
*
FROM
FROM
(
SELECT
SELECT
table_1.key
FROM
FROM
table_1, table_2
WHERE
table_1.key = table_2.key
table_1.key = table_2.key
OFFSET 0
) as foo
) AND value IN (SELECT key FROM table_4);
@ -358,7 +356,7 @@ BEGIN;
UPDATE table_6 SET value = 15;
SELECT * FROM relation_acesses WHERE table_name IN ('table_6');
ALTER TABLE table_6 ADD COLUMN x INT;
ALTER TABLE table_6 ADD COLUMN x INT;
SELECT * FROM relation_acesses WHERE table_name IN ('table_6');
ROLLBACK;
@ -515,15 +513,15 @@ BEGIN;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1', 'table_2') ORDER BY 1;
ROLLBACK;
-- CTEs with SELECT only should work fine
-- CTEs with SELECT only should work fine
BEGIN;
WITH cte AS (SELECT count(*) FROM table_1)
SELECT * FROM cte;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
COMMIT;
-- CTEs with SELECT only in sequential mode should work fine
-- CTEs with SELECT only in sequential mode should work fine
BEGIN;
SET LOCAL citus.multi_shard_modify_mode = 'sequential';
@ -534,7 +532,7 @@ COMMIT;
-- modifying CTEs should work fine with multi-row inserts, which are by default in sequential
BEGIN;
WITH cte_1 AS (INSERT INTO table_1 VALUES (1000,1000), (1001, 1001), (1002, 1002) RETURNING *)
SELECT * FROM cte_1 ORDER BY 1;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
@ -542,7 +540,7 @@ ROLLBACK;
-- modifying CTEs should work fine with parallel mode
BEGIN;
WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *)
SELECT count(*) FROM cte_1 ORDER BY 1;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
@ -550,19 +548,19 @@ ROLLBACK;
-- modifying CTEs should work fine with sequential mode
BEGIN;
WITH cte_1 AS (UPDATE table_1 SET value = 15 RETURNING *)
SELECT count(*) FROM cte_1 ORDER BY 1;
SELECT * FROM relation_acesses WHERE table_name IN ('table_1') ORDER BY 1;
ROLLBACK;
-- create distributed table with data loading
-- create distributed table with data loading
-- should mark both parallel dml and parallel ddl
DROP TABLE table_3;
CREATE TABLE table_3 (key int, value int);
INSERT INTO table_3 SELECT i, i FROM generate_series(0,100) i;
BEGIN;
SELECT create_distributed_table('table_3', 'key');
SELECT create_distributed_table('table_3', 'key');
SELECT * FROM relation_acesses WHERE table_name IN ('table_3') ORDER BY 1;
COMMIT;

Some files were not shown because too many files have changed in this diff Show More