Merge pull request #2844 from citusdata/postgres12

Postgres 12
pull/2906/head
Philip Dubé 2019-08-22 19:36:24 +00:00 committed by GitHub
commit d3be6cd0a6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
115 changed files with 10389 additions and 1104 deletions

1
.gitattributes vendored
View File

@ -28,4 +28,5 @@ configure -whitespace
src/backend/distributed/utils/citus_outfuncs.c -citus-style src/backend/distributed/utils/citus_outfuncs.c -citus-style
src/backend/distributed/utils/ruleutils_10.c -citus-style src/backend/distributed/utils/ruleutils_10.c -citus-style
src/backend/distributed/utils/ruleutils_11.c -citus-style src/backend/distributed/utils/ruleutils_11.c -citus-style
src/backend/distributed/utils/ruleutils_12.c -citus-style
src/include/distributed/citus_nodes.h -citus-style src/include/distributed/citus_nodes.h -citus-style

37
configure vendored
View File

@ -2531,7 +2531,7 @@ if test -z "$version_num"; then
as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5 as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5
fi fi
if test "$version_num" != '10' -a "$version_num" != '11'; then if test "$version_num" != '10' -a "$version_num" != '11' -a "$version_num" != '12'; then
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5 as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
else else
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
@ -3832,6 +3832,41 @@ if test x"$citusac_cv_prog_cc_cflags__Wno_clobbered" = x"yes"; then
CITUS_CFLAGS="$CITUS_CFLAGS -Wno-clobbered" CITUS_CFLAGS="$CITUS_CFLAGS -Wno-clobbered"
fi fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wno-gnu-variable-sized-type-not-at-end" >&5
$as_echo_n "checking whether $CC supports -Wno-gnu-variable-sized-type-not-at-end... " >&6; }
if ${citusac_cv_prog_cc_cflags__Wno_gnu_variable_sized_type_not_at_end+:} false; then :
$as_echo_n "(cached) " >&6
else
citusac_save_CFLAGS=$CFLAGS
CFLAGS="$citusac_save_CFLAGS -Wno-gnu-variable-sized-type-not-at-end"
ac_save_c_werror_flag=$ac_c_werror_flag
ac_c_werror_flag=yes
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
int
main ()
{
;
return 0;
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
citusac_cv_prog_cc_cflags__Wno_gnu_variable_sized_type_not_at_end=yes
else
citusac_cv_prog_cc_cflags__Wno_gnu_variable_sized_type_not_at_end=no
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
ac_c_werror_flag=$ac_save_c_werror_flag
CFLAGS="$citusac_save_CFLAGS"
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wno_gnu_variable_sized_type_not_at_end" >&5
$as_echo "$citusac_cv_prog_cc_cflags__Wno_gnu_variable_sized_type_not_at_end" >&6; }
if test x"$citusac_cv_prog_cc_cflags__Wno_gnu_variable_sized_type_not_at_end" = x"yes"; then
CITUS_CFLAGS="$CITUS_CFLAGS -Wno-gnu-variable-sized-type-not-at-end"
fi
# And add a few extra warnings # And add a few extra warnings
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wdeclaration-after-statement" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wdeclaration-after-statement" >&5
$as_echo_n "checking whether $CC supports -Wdeclaration-after-statement... " >&6; } $as_echo_n "checking whether $CC supports -Wdeclaration-after-statement... " >&6; }

View File

@ -74,7 +74,7 @@ if test -z "$version_num"; then
AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.]) AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.])
fi fi
if test "$version_num" != '10' -a "$version_num" != '11'; then if test "$version_num" != '10' -a "$version_num" != '11' -a "$version_num" != '12'; then
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
else else
AC_MSG_NOTICE([building against PostgreSQL $version_num]) AC_MSG_NOTICE([building against PostgreSQL $version_num])
@ -157,6 +157,7 @@ CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-unused-parameter])
CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-sign-compare]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-sign-compare])
CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-missing-field-initializers]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-missing-field-initializers])
CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-clobbered]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-clobbered])
CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-gnu-variable-sized-type-not-at-end])
# And add a few extra warnings # And add a few extra warnings
CITUSAC_PROG_CC_CFLAGS_OPT([-Wdeclaration-after-statement]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wdeclaration-after-statement])
CITUSAC_PROG_CC_CFLAGS_OPT([-Wendif-labels]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wendif-labels])

View File

@ -21,12 +21,16 @@
#include "catalog/dependency.h" #include "catalog/dependency.h"
#include "catalog/index.h" #include "catalog/index.h"
#include "catalog/pg_am.h" #include "catalog/pg_am.h"
#include "catalog/pg_attribute.h"
#if (PG_VERSION_NUM < 110000) #if (PG_VERSION_NUM < 110000)
#include "catalog/pg_constraint_fn.h" #include "catalog/pg_constraint_fn.h"
#endif #endif
#include "catalog/pg_enum.h" #include "catalog/pg_enum.h"
#include "catalog/pg_extension.h" #include "catalog/pg_extension.h"
#include "catalog/pg_opclass.h" #include "catalog/pg_opclass.h"
#if PG_VERSION_NUM >= 12000
#include "catalog/pg_proc.h"
#endif
#include "catalog/pg_trigger.h" #include "catalog/pg_trigger.h"
#include "commands/defrem.h" #include "commands/defrem.h"
#include "commands/extension.h" #include "commands/extension.h"
@ -99,6 +103,8 @@ static bool LocalTableEmpty(Oid tableId);
static void CopyLocalDataIntoShards(Oid relationId); static void CopyLocalDataIntoShards(Oid relationId);
static List * TupleDescColumnNameList(TupleDesc tupleDescriptor); static List * TupleDescColumnNameList(TupleDesc tupleDescriptor);
static bool RelationUsesIdentityColumns(TupleDesc relationDesc); static bool RelationUsesIdentityColumns(TupleDesc relationDesc);
static bool RelationUsesGeneratedStoredColumns(TupleDesc relationDesc);
static bool RelationUsesHeapAccessMethodOrNone(Relation relation);
static bool CanUseExclusiveConnections(Oid relationId, bool localTableEmpty); static bool CanUseExclusiveConnections(Oid relationId, bool localTableEmpty);
/* exports for SQL callable functions */ /* exports for SQL callable functions */
@ -645,6 +651,14 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
relationDesc = RelationGetDescr(relation); relationDesc = RelationGetDescr(relation);
relationName = RelationGetRelationName(relation); relationName = RelationGetRelationName(relation);
if (!RelationUsesHeapAccessMethodOrNone(relation))
{
ereport(ERROR, (errmsg(
"cannot distribute relations using non-heap access methods")));
}
#if PG_VERSION_NUM < 120000
/* verify target relation does not use WITH (OIDS) PostgreSQL feature */ /* verify target relation does not use WITH (OIDS) PostgreSQL feature */
if (relationDesc->tdhasoid) if (relationDesc->tdhasoid)
{ {
@ -653,6 +667,7 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
errdetail("Distributed relations must not specify the WITH " errdetail("Distributed relations must not specify the WITH "
"(OIDS) option in their definitions."))); "(OIDS) option in their definitions.")));
} }
#endif
/* verify target relation does not use identity columns */ /* verify target relation does not use identity columns */
if (RelationUsesIdentityColumns(relationDesc)) if (RelationUsesIdentityColumns(relationDesc))
@ -663,6 +678,15 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
"... AS IDENTITY."))); "... AS IDENTITY.")));
} }
/* verify target relation does not use generated columns */
if (RelationUsesGeneratedStoredColumns(relationDesc))
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot distribute relation: %s", relationName),
errdetail("Distributed relations must not use GENERATED ALWAYS "
"AS (...) STORED.")));
}
/* check for support function needed by specified partition method */ /* check for support function needed by specified partition method */
if (distributionMethod == DISTRIBUTE_BY_HASH) if (distributionMethod == DISTRIBUTE_BY_HASH)
{ {
@ -1196,7 +1220,11 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
bool stopOnFailure = true; bool stopOnFailure = true;
EState *estate = NULL; EState *estate = NULL;
#if PG_VERSION_NUM >= 120000
TableScanDesc scan = NULL;
#else
HeapScanDesc scan = NULL; HeapScanDesc scan = NULL;
#endif
HeapTuple tuple = NULL; HeapTuple tuple = NULL;
ExprContext *econtext = NULL; ExprContext *econtext = NULL;
MemoryContext oldContext = NULL; MemoryContext oldContext = NULL;
@ -1230,7 +1258,7 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
/* get the table columns */ /* get the table columns */
tupleDescriptor = RelationGetDescr(distributedRelation); tupleDescriptor = RelationGetDescr(distributedRelation);
slot = MakeSingleTupleTableSlot(tupleDescriptor); slot = MakeSingleTupleTableSlotCompat(tupleDescriptor, &TTSOpsHeapTuple);
columnNameList = TupleDescColumnNameList(tupleDescriptor); columnNameList = TupleDescColumnNameList(tupleDescriptor);
/* determine the partition column in the tuple descriptor */ /* determine the partition column in the tuple descriptor */
@ -1256,14 +1284,22 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
copyDest->rStartup(copyDest, 0, tupleDescriptor); copyDest->rStartup(copyDest, 0, tupleDescriptor);
/* begin reading from local table */ /* begin reading from local table */
#if PG_VERSION_NUM >= 120000
scan = table_beginscan(distributedRelation, GetActiveSnapshot(), 0, NULL);
#else
scan = heap_beginscan(distributedRelation, GetActiveSnapshot(), 0, NULL); scan = heap_beginscan(distributedRelation, GetActiveSnapshot(), 0, NULL);
#endif
oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{ {
/* materialize tuple and send it to a shard */ /* materialize tuple and send it to a shard */
#if PG_VERSION_NUM >= 120000
ExecStoreHeapTuple(tuple, slot, false);
#else
ExecStoreTuple(tuple, slot, InvalidBuffer, false); ExecStoreTuple(tuple, slot, InvalidBuffer, false);
#endif
copyDest->receiveSlot(slot, copyDest); copyDest->receiveSlot(slot, copyDest);
/* clear tuple memory */ /* clear tuple memory */
@ -1293,7 +1329,11 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
MemoryContextSwitchTo(oldContext); MemoryContextSwitchTo(oldContext);
/* finish reading from the local table */ /* finish reading from the local table */
#if PG_VERSION_NUM >= 120000
table_endscan(scan);
#else
heap_endscan(scan); heap_endscan(scan);
#endif
/* finish writing into the shards */ /* finish writing into the shards */
copyDest->rShutdown(copyDest); copyDest->rShutdown(copyDest);
@ -1355,3 +1395,44 @@ RelationUsesIdentityColumns(TupleDesc relationDesc)
return false; return false;
} }
/*
* RelationUsesIdentityColumns returns whether a given relation uses the SQL
* GENERATED ... AS IDENTITY features introduced as of PostgreSQL 10.
*/
static bool
RelationUsesGeneratedStoredColumns(TupleDesc relationDesc)
{
#if PG_VERSION_NUM >= 120000
int attributeIndex = 0;
for (attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++)
{
Form_pg_attribute attributeForm = TupleDescAttr(relationDesc, attributeIndex);
if (attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED)
{
return true;
}
}
#endif
return false;
}
/*
* Returns whether given relation uses default access method
*/
static bool
RelationUsesHeapAccessMethodOrNone(Relation relation)
{
#if PG_VERSION_NUM >= 120000
return relation->rd_rel->relkind != RELKIND_RELATION ||
relation->rd_amhandler == HEAP_TABLE_AM_HANDLER_OID;
#else
return true;
#endif
}

View File

@ -15,6 +15,9 @@
#include "access/htup_details.h" #include "access/htup_details.h"
#include "catalog/namespace.h" #include "catalog/namespace.h"
#include "catalog/pg_constraint.h" #include "catalog/pg_constraint.h"
#if (PG_VERSION_NUM >= 120000)
#include "access/genam.h"
#endif
#if (PG_VERSION_NUM < 110000) #if (PG_VERSION_NUM < 110000)
#include "catalog/pg_constraint_fn.h" #include "catalog/pg_constraint_fn.h"
#endif #endif

View File

@ -10,6 +10,9 @@
#include "postgres.h" #include "postgres.h"
#if PG_VERSION_NUM > 12000
#include "access/genam.h"
#endif
#include "access/htup_details.h" #include "access/htup_details.h"
#include "access/xact.h" #include "access/xact.h"
#include "catalog/catalog.h" #include "catalog/catalog.h"

View File

@ -463,7 +463,7 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
columnNulls = palloc0(columnCount * sizeof(bool)); columnNulls = palloc0(columnCount * sizeof(bool));
/* set up a virtual tuple table slot */ /* set up a virtual tuple table slot */
tupleTableSlot = MakeSingleTupleTableSlot(tupleDescriptor); tupleTableSlot = MakeSingleTupleTableSlotCompat(tupleDescriptor, &TTSOpsVirtual);
tupleTableSlot->tts_nvalid = columnCount; tupleTableSlot->tts_nvalid = columnCount;
tupleTableSlot->tts_values = columnValues; tupleTableSlot->tts_values = columnValues;
tupleTableSlot->tts_isnull = columnNulls; tupleTableSlot->tts_isnull = columnNulls;
@ -561,8 +561,8 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
oldContext = MemoryContextSwitchTo(executorTupleContext); oldContext = MemoryContextSwitchTo(executorTupleContext);
/* parse a row from the input */ /* parse a row from the input */
nextRowFound = NextCopyFrom(copyState, executorExpressionContext, nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
columnValues, columnNulls, NULL); columnValues, columnNulls);
if (!nextRowFound) if (!nextRowFound)
{ {
@ -681,8 +681,8 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId)
oldContext = MemoryContextSwitchTo(executorTupleContext); oldContext = MemoryContextSwitchTo(executorTupleContext);
/* parse a row from the input */ /* parse a row from the input */
nextRowFound = NextCopyFrom(copyState, executorExpressionContext, nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
columnValues, columnNulls, NULL); columnValues, columnNulls);
if (!nextRowFound) if (!nextRowFound)
{ {
@ -2803,6 +2803,14 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
{ {
if (copyStatement->is_from) if (copyStatement->is_from)
{ {
#if PG_VERSION_NUM >= 120000
if (copyStatement->whereClause)
{
ereport(ERROR, (errmsg(
"Citus does not support COPY FROM with WHERE")));
}
#endif
/* check permissions, we're bypassing postgres' normal checks */ /* check permissions, we're bypassing postgres' normal checks */
if (!isCopyFromWorker) if (!isCopyFromWorker)
{ {
@ -2812,7 +2820,7 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
CitusCopyFrom(copyStatement, completionTag); CitusCopyFrom(copyStatement, completionTag);
return NULL; return NULL;
} }
else if (!copyStatement->is_from) else
{ {
/* /*
* The copy code only handles SELECTs in COPY ... TO on master tables, * The copy code only handles SELECTs in COPY ... TO on master tables,

View File

@ -68,7 +68,6 @@ ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt)
return; return;
} }
#if (PG_VERSION_NUM >= 100000)
sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_AUTO, &ownedByTableId, sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_AUTO, &ownedByTableId,
&ownedByColumnId); &ownedByColumnId);
if (!sequenceOwned) if (!sequenceOwned)
@ -76,9 +75,6 @@ ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt)
sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_INTERNAL, &ownedByTableId, sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_INTERNAL, &ownedByTableId,
&ownedByColumnId); &ownedByColumnId);
} }
#else
sequenceOwned = sequenceIsOwned(sequenceId, &ownedByTableId, &ownedByColumnId);
#endif
/* see whether the sequence is already owned by a distributed table */ /* see whether the sequence is already owned by a distributed table */
if (sequenceOwned) if (sequenceOwned)

View File

@ -10,8 +10,6 @@
#include "postgres.h" #include "postgres.h"
#if (PG_VERSION_NUM >= 100000)
#include "distributed/commands.h" #include "distributed/commands.h"
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
@ -22,6 +20,3 @@ ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt)
{ {
return (Node *) createSubStmt; return (Node *) createSubStmt;
} }
#endif /* PG_VERSION_NUM >= 100000 */

View File

@ -10,6 +10,9 @@
#include "postgres.h" #include "postgres.h"
#if PG_VERSION_NUM >= 120000
#include "access/genam.h"
#endif
#include "access/htup_details.h" #include "access/htup_details.h"
#include "access/xact.h" #include "access/xact.h"
#include "catalog/index.h" #include "catalog/index.h"

View File

@ -43,11 +43,10 @@ RedirectCopyDataToRegularFile(const char *filename)
{ {
StringInfo copyData = makeStringInfo(); StringInfo copyData = makeStringInfo();
bool copyDone = false; bool copyDone = false;
File fileDesc = -1;
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
const int fileMode = (S_IRUSR | S_IWUSR); const int fileMode = (S_IRUSR | S_IWUSR);
File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode);
fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); FileCompat fileCompat = FileCompatFromFileStart(fileDesc);
SendCopyInStart(); SendCopyInStart();
@ -57,8 +56,8 @@ RedirectCopyDataToRegularFile(const char *filename)
/* if received data has contents, append to regular file */ /* if received data has contents, append to regular file */
if (copyData->len > 0) if (copyData->len > 0)
{ {
int appended = FileWrite(fileDesc, copyData->data, copyData->len, int appended = FileWriteCompat(&fileCompat, copyData->data,
PG_WAIT_IO); copyData->len, PG_WAIT_IO);
if (appended != copyData->len) if (appended != copyData->len)
{ {
@ -84,7 +83,6 @@ RedirectCopyDataToRegularFile(const char *filename)
void void
SendRegularFile(const char *filename) SendRegularFile(const char *filename)
{ {
File fileDesc = -1;
StringInfo fileBuffer = NULL; StringInfo fileBuffer = NULL;
int readBytes = -1; int readBytes = -1;
const uint32 fileBufferSize = 32768; /* 32 KB */ const uint32 fileBufferSize = 32768; /* 32 KB */
@ -92,7 +90,8 @@ SendRegularFile(const char *filename)
const int fileMode = 0; const int fileMode = 0;
/* we currently do not check if the caller has permissions for this file */ /* we currently do not check if the caller has permissions for this file */
fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode);
FileCompat fileCompat = FileCompatFromFileStart(fileDesc);
/* /*
* We read file's contents into buffers of 32 KB. This buffer size is twice * We read file's contents into buffers of 32 KB. This buffer size is twice
@ -103,7 +102,8 @@ SendRegularFile(const char *filename)
SendCopyOutStart(); SendCopyOutStart();
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize, PG_WAIT_IO); readBytes = FileReadCompat(&fileCompat, fileBuffer->data, fileBufferSize,
PG_WAIT_IO);
while (readBytes > 0) while (readBytes > 0)
{ {
fileBuffer->len = readBytes; fileBuffer->len = readBytes;
@ -111,8 +111,8 @@ SendRegularFile(const char *filename)
SendCopyData(fileBuffer); SendCopyData(fileBuffer);
resetStringInfo(fileBuffer); resetStringInfo(fileBuffer);
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize, readBytes = FileReadCompat(&fileCompat, fileBuffer->data, fileBufferSize,
PG_WAIT_IO); PG_WAIT_IO);
} }
SendCopyDone(); SendCopyDone();

View File

@ -194,7 +194,7 @@ multi_ProcessUtility(PlannedStmt *pstmt,
/* /*
* TRANSMIT used to be separate command, but to avoid patching the grammar * TRANSMIT used to be separate command, but to avoid patching the grammar
* it's no overlaid onto COPY, but with FORMAT = 'transmit' instead of the * it's now overlaid onto COPY, but with FORMAT = 'transmit' instead of the
* normal FORMAT options. * normal FORMAT options.
*/ */
if (IsTransmitStmt(parsetree)) if (IsTransmitStmt(parsetree))

View File

@ -9,8 +9,11 @@
*/ */
#include "postgres.h" #include "postgres.h"
#include "c.h"
#if PG_VERSION_NUM >= 120000
#include "commands/defrem.h"
#endif
#include "commands/vacuum.h"
#include "distributed/commands.h" #include "distributed/commands.h"
#include "distributed/commands/utility_hook.h" #include "distributed/commands/utility_hook.h"
#include "distributed/metadata_cache.h" #include "distributed/metadata_cache.h"
@ -22,13 +25,26 @@
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
/*
* Subset of VacuumParams we care about
*/
typedef struct CitusVacuumParams
{
int options;
#if PG_VERSION_NUM >= 120000
VacOptTernaryValue truncate;
VacOptTernaryValue index_cleanup;
#endif
} CitusVacuumParams;
/* Local functions forward declarations for processing distributed table commands */ /* Local functions forward declarations for processing distributed table commands */
static bool IsDistributedVacuumStmt(VacuumStmt *vacuumStmt, List *vacuumRelationIdList); static bool IsDistributedVacuumStmt(int vacuumOptions, List *vacuumRelationIdList);
static List * VacuumTaskList(Oid relationId, int vacuumOptions, List *vacuumColumnList); static List * VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams,
static StringInfo DeparseVacuumStmtPrefix(int vacuumFlags); List *vacuumColumnList);
static StringInfo DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams);
static char * DeparseVacuumColumnNames(List *columnNameList); static char * DeparseVacuumColumnNames(List *columnNameList);
static CitusVacuumParams VacuumStmtParams(VacuumStmt *vacstmt);
/* /*
* ProcessVacuumStmt processes vacuum statements that may need propagation to * ProcessVacuumStmt processes vacuum statements that may need propagation to
@ -49,7 +65,8 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
ListCell *vacuumRelationCell = NULL; ListCell *vacuumRelationCell = NULL;
List *relationIdList = NIL; List *relationIdList = NIL;
ListCell *relationIdCell = NULL; ListCell *relationIdCell = NULL;
LOCKMODE lockMode = (vacuumStmt->options & VACOPT_FULL) ? AccessExclusiveLock : CitusVacuumParams vacuumParams = VacuumStmtParams(vacuumStmt);
LOCKMODE lockMode = (vacuumParams.options & VACOPT_FULL) ? AccessExclusiveLock :
ShareUpdateExclusiveLock; ShareUpdateExclusiveLock;
int executedVacuumCount = 0; int executedVacuumCount = 0;
@ -60,7 +77,7 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
relationIdList = lappend_oid(relationIdList, relationId); relationIdList = lappend_oid(relationIdList, relationId);
} }
distributedVacuumStmt = IsDistributedVacuumStmt(vacuumStmt, relationIdList); distributedVacuumStmt = IsDistributedVacuumStmt(vacuumParams.options, relationIdList);
if (!distributedVacuumStmt) if (!distributedVacuumStmt)
{ {
return; return;
@ -81,7 +98,7 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
* commands can run inside a transaction block. Notice that we do this * commands can run inside a transaction block. Notice that we do this
* once even if there are multiple distributed tables to be vacuumed. * once even if there are multiple distributed tables to be vacuumed.
*/ */
if (executedVacuumCount == 0 && (vacuumStmt->options & VACOPT_VACUUM) != 0) if (executedVacuumCount == 0 && (vacuumParams.options & VACOPT_VACUUM) != 0)
{ {
/* save old commit protocol to restore at xact end */ /* save old commit protocol to restore at xact end */
Assert(SavedMultiShardCommitProtocol == COMMIT_PROTOCOL_BARE); Assert(SavedMultiShardCommitProtocol == COMMIT_PROTOCOL_BARE);
@ -90,7 +107,7 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
} }
vacuumColumnList = VacuumColumnList(vacuumStmt, relationIndex); vacuumColumnList = VacuumColumnList(vacuumStmt, relationIndex);
taskList = VacuumTaskList(relationId, vacuumStmt->options, vacuumColumnList); taskList = VacuumTaskList(relationId, vacuumParams, vacuumColumnList);
/* use adaptive executor when enabled */ /* use adaptive executor when enabled */
ExecuteUtilityTaskListWithoutResults(taskList); ExecuteUtilityTaskListWithoutResults(taskList);
@ -110,9 +127,9 @@ ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
* false otherwise. * false otherwise.
*/ */
static bool static bool
IsDistributedVacuumStmt(VacuumStmt *vacuumStmt, List *vacuumRelationIdList) IsDistributedVacuumStmt(int vacuumOptions, List *vacuumRelationIdList)
{ {
const char *stmtName = (vacuumStmt->options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE"; const char *stmtName = (vacuumOptions & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE";
bool distributeStmt = false; bool distributeStmt = false;
ListCell *relationIdCell = NULL; ListCell *relationIdCell = NULL;
int distributedRelationCount = 0; int distributedRelationCount = 0;
@ -166,14 +183,14 @@ IsDistributedVacuumStmt(VacuumStmt *vacuumStmt, List *vacuumRelationIdList)
* a VacuumStmt which targets a distributed relation. * a VacuumStmt which targets a distributed relation.
*/ */
static List * static List *
VacuumTaskList(Oid relationId, int vacuumOptions, List *vacuumColumnList) VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColumnList)
{ {
List *taskList = NIL; List *taskList = NIL;
List *shardIntervalList = NIL; List *shardIntervalList = NIL;
ListCell *shardIntervalCell = NULL; ListCell *shardIntervalCell = NULL;
uint64 jobId = INVALID_JOB_ID; uint64 jobId = INVALID_JOB_ID;
int taskId = 1; int taskId = 1;
StringInfo vacuumString = DeparseVacuumStmtPrefix(vacuumOptions); StringInfo vacuumString = DeparseVacuumStmtPrefix(vacuumParams);
const char *columnNames = NULL; const char *columnNames = NULL;
const int vacuumPrefixLen = vacuumString->len; const int vacuumPrefixLen = vacuumString->len;
Oid schemaId = get_rel_namespace(relationId); Oid schemaId = get_rel_namespace(relationId);
@ -233,18 +250,12 @@ VacuumTaskList(Oid relationId, int vacuumOptions, List *vacuumColumnList)
* statements. * statements.
*/ */
static StringInfo static StringInfo
DeparseVacuumStmtPrefix(int vacuumFlags) DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
{ {
int vacuumFlags = vacuumParams.options;
StringInfo vacuumPrefix = makeStringInfo(); StringInfo vacuumPrefix = makeStringInfo();
const int unsupportedFlags PG_USED_FOR_ASSERTS_ONLY = ~(
VACOPT_ANALYZE |
VACOPT_DISABLE_PAGE_SKIPPING |
VACOPT_FREEZE |
VACOPT_FULL |
VACOPT_VERBOSE
);
/* determine actual command and block out its bit */ /* determine actual command and block out its bits */
if (vacuumFlags & VACOPT_VACUUM) if (vacuumFlags & VACOPT_VACUUM)
{ {
appendStringInfoString(vacuumPrefix, "VACUUM "); appendStringInfoString(vacuumPrefix, "VACUUM ");
@ -252,6 +263,8 @@ DeparseVacuumStmtPrefix(int vacuumFlags)
} }
else else
{ {
Assert((vacuumFlags & VACOPT_ANALYZE) != 0);
appendStringInfoString(vacuumPrefix, "ANALYZE "); appendStringInfoString(vacuumPrefix, "ANALYZE ");
vacuumFlags &= ~VACOPT_ANALYZE; vacuumFlags &= ~VACOPT_ANALYZE;
@ -262,11 +275,13 @@ DeparseVacuumStmtPrefix(int vacuumFlags)
} }
} }
/* unsupported flags should have already been rejected */
Assert((vacuumFlags & unsupportedFlags) == 0);
/* if no flags remain, exit early */ /* if no flags remain, exit early */
if (vacuumFlags == 0) if (vacuumFlags == 0
#if PG_VERSION_NUM >= 120000
&& vacuumParams.truncate == VACOPT_TERNARY_DEFAULT &&
vacuumParams.index_cleanup == VACOPT_TERNARY_DEFAULT
#endif
)
{ {
return vacuumPrefix; return vacuumPrefix;
} }
@ -299,6 +314,29 @@ DeparseVacuumStmtPrefix(int vacuumFlags)
appendStringInfoString(vacuumPrefix, "VERBOSE,"); appendStringInfoString(vacuumPrefix, "VERBOSE,");
} }
#if PG_VERSION_NUM >= 120000
if (vacuumFlags & VACOPT_SKIP_LOCKED)
{
appendStringInfoString(vacuumPrefix, "SKIP_LOCKED,");
}
if (vacuumParams.truncate != VACOPT_TERNARY_DEFAULT)
{
appendStringInfoString(vacuumPrefix,
vacuumParams.truncate == VACOPT_TERNARY_ENABLED ?
"TRUNCATE," : "TRUNCATE false,"
);
}
if (vacuumParams.index_cleanup != VACOPT_TERNARY_DEFAULT)
{
appendStringInfoString(vacuumPrefix,
vacuumParams.index_cleanup == VACOPT_TERNARY_ENABLED ?
"INDEX_CLEANUP," : "INDEX_CLEANUP false,"
);
}
#endif
vacuumPrefix->data[vacuumPrefix->len - 1] = ')'; vacuumPrefix->data[vacuumPrefix->len - 1] = ')';
appendStringInfoChar(vacuumPrefix, ' '); appendStringInfoChar(vacuumPrefix, ' ');
@ -339,3 +377,108 @@ DeparseVacuumColumnNames(List *columnNameList)
return columnNames->data; return columnNames->data;
} }
/*
* VacuumStmtParams returns a CitusVacuumParams based on the supplied VacuumStmt.
*/
#if PG_VERSION_NUM >= 120000
/*
* This is mostly ExecVacuum from Postgres's commands/vacuum.c
*/
static CitusVacuumParams
VacuumStmtParams(VacuumStmt *vacstmt)
{
CitusVacuumParams params;
bool verbose = false;
bool skip_locked = false;
bool analyze = false;
bool freeze = false;
bool full = false;
bool disable_page_skipping = false;
ListCell *lc;
/* Set default value */
params.index_cleanup = VACOPT_TERNARY_DEFAULT;
params.truncate = VACOPT_TERNARY_DEFAULT;
/* Parse options list */
foreach(lc, vacstmt->options)
{
DefElem *opt = (DefElem *) lfirst(lc);
/* Parse common options for VACUUM and ANALYZE */
if (strcmp(opt->defname, "verbose") == 0)
{
verbose = defGetBoolean(opt);
}
else if (strcmp(opt->defname, "skip_locked") == 0)
{
skip_locked = defGetBoolean(opt);
}
else if (!vacstmt->is_vacuumcmd)
{
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("unrecognized ANALYZE option \"%s\"", opt->defname)));
}
/* Parse options available on VACUUM */
else if (strcmp(opt->defname, "analyze") == 0)
{
analyze = defGetBoolean(opt);
}
else if (strcmp(opt->defname, "freeze") == 0)
{
freeze = defGetBoolean(opt);
}
else if (strcmp(opt->defname, "full") == 0)
{
full = defGetBoolean(opt);
}
else if (strcmp(opt->defname, "disable_page_skipping") == 0)
{
disable_page_skipping = defGetBoolean(opt);
}
else if (strcmp(opt->defname, "index_cleanup") == 0)
{
params.index_cleanup = defGetBoolean(opt) ? VACOPT_TERNARY_ENABLED :
VACOPT_TERNARY_DISABLED;
}
else if (strcmp(opt->defname, "truncate") == 0)
{
params.truncate = defGetBoolean(opt) ? VACOPT_TERNARY_ENABLED :
VACOPT_TERNARY_DISABLED;
}
else
{
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("unrecognized VACUUM option \"%s\"", opt->defname)
));
}
}
params.options = (vacstmt->is_vacuumcmd ? VACOPT_VACUUM : VACOPT_ANALYZE) |
(verbose ? VACOPT_VERBOSE : 0) |
(skip_locked ? VACOPT_SKIP_LOCKED : 0) |
(analyze ? VACOPT_ANALYZE : 0) |
(freeze ? VACOPT_FREEZE : 0) |
(full ? VACOPT_FULL : 0) |
(disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0);
return params;
}
#else
static CitusVacuumParams
VacuumStmtParams(VacuumStmt *vacuumStmt)
{
CitusVacuumParams params;
params.options = vacuumStmt->options;
return params;
}
#endif

View File

@ -1695,13 +1695,8 @@ RunDistributedExecution(DistributedExecution *execution)
} }
/* wait for I/O events */ /* wait for I/O events */
#if (PG_VERSION_NUM >= 100000)
eventCount = WaitEventSetWait(execution->waitEventSet, timeout, events, eventCount = WaitEventSetWait(execution->waitEventSet, timeout, events,
eventSetSize, WAIT_EVENT_CLIENT_READ); eventSetSize, WAIT_EVENT_CLIENT_READ);
#else
eventCount = WaitEventSetWait(execution->waitEventSet, timeout, events,
eventSetSize);
#endif
/* process I/O events */ /* process I/O events */
for (; eventIndex < eventCount; eventIndex++) for (; eventIndex < eventCount; eventIndex++)

View File

@ -162,6 +162,11 @@ CitusBeginScan(CustomScanState *node, EState *estate, int eflags)
MarkCitusInitiatedCoordinatorBackend(); MarkCitusInitiatedCoordinatorBackend();
scanState = (CitusScanState *) node; scanState = (CitusScanState *) node;
#if PG_VERSION_NUM >= 120000
ExecInitResultSlot(&scanState->customScanState.ss.ps, &TTSOpsMinimalTuple);
#endif
distributedPlan = scanState->distributedPlan; distributedPlan = scanState->distributedPlan;
if (distributedPlan->modLevel == ROW_MODIFY_READONLY || if (distributedPlan->modLevel == ROW_MODIFY_READONLY ||
distributedPlan->insertSelectSubquery != NULL) distributedPlan->insertSelectSubquery != NULL)

View File

@ -27,6 +27,7 @@
#include "distributed/remote_commands.h" #include "distributed/remote_commands.h"
#include "distributed/transmit.h" #include "distributed/transmit.h"
#include "distributed/transaction_identifier.h" #include "distributed/transaction_identifier.h"
#include "distributed/version_compat.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
@ -65,7 +66,7 @@ typedef struct RemoteFileDestReceiver
/* whether to write to a local file */ /* whether to write to a local file */
bool writeLocalFile; bool writeLocalFile;
File fileDesc; FileCompat fileCompat;
/* state on how to copy out data types */ /* state on how to copy out data types */
CopyOutState copyOutState; CopyOutState copyOutState;
@ -79,7 +80,7 @@ typedef struct RemoteFileDestReceiver
static void RemoteFileDestReceiverStartup(DestReceiver *dest, int operation, static void RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
TupleDesc inputTupleDescriptor); TupleDesc inputTupleDescriptor);
static StringInfo ConstructCopyResultStatement(const char *resultId); static StringInfo ConstructCopyResultStatement(const char *resultId);
static void WriteToLocalFile(StringInfo copyData, File fileDesc); static void WriteToLocalFile(StringInfo copyData, FileCompat *fileCompat);
static bool RemoteFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest); static bool RemoteFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest);
static void BroadcastCopyData(StringInfo dataBuffer, List *connectionList); static void BroadcastCopyData(StringInfo dataBuffer, List *connectionList);
static void SendCopyDataOverConnection(StringInfo dataBuffer, static void SendCopyDataOverConnection(StringInfo dataBuffer,
@ -263,7 +264,9 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
elog(DEBUG1, "writing to local file \"%s\"", fileName); elog(DEBUG1, "writing to local file \"%s\"", fileName);
resultDest->fileDesc = FileOpenForTransmit(fileName, fileFlags, fileMode); resultDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(fileName,
fileFlags,
fileMode));
} }
foreach(initialNodeCell, initialNodeList) foreach(initialNodeCell, initialNodeList)
@ -329,7 +332,7 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
if (resultDest->writeLocalFile) if (resultDest->writeLocalFile)
{ {
WriteToLocalFile(copyOutState->fe_msgbuf, resultDest->fileDesc); WriteToLocalFile(copyOutState->fe_msgbuf, &resultDest->fileCompat);
} }
} }
@ -394,7 +397,7 @@ RemoteFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest)
/* write to local file (if applicable) */ /* write to local file (if applicable) */
if (resultDest->writeLocalFile) if (resultDest->writeLocalFile)
{ {
WriteToLocalFile(copyOutState->fe_msgbuf, resultDest->fileDesc); WriteToLocalFile(copyOutState->fe_msgbuf, &resultDest->fileCompat);
} }
MemoryContextSwitchTo(oldContext); MemoryContextSwitchTo(oldContext);
@ -411,9 +414,11 @@ RemoteFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest)
* WriteToLocalResultsFile writes the bytes in a StringInfo to a local file. * WriteToLocalResultsFile writes the bytes in a StringInfo to a local file.
*/ */
static void static void
WriteToLocalFile(StringInfo copyData, File fileDesc) WriteToLocalFile(StringInfo copyData, FileCompat *fileCompat)
{ {
int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len, PG_WAIT_IO); int bytesWritten = FileWriteCompat(fileCompat, copyData->data,
copyData->len,
PG_WAIT_IO);
if (bytesWritten < 0) if (bytesWritten < 0)
{ {
ereport(ERROR, (errcode_for_file_access(), ereport(ERROR, (errcode_for_file_access(),
@ -444,7 +449,7 @@ RemoteFileDestReceiverShutdown(DestReceiver *destReceiver)
if (resultDest->writeLocalFile) if (resultDest->writeLocalFile)
{ {
WriteToLocalFile(copyOutState->fe_msgbuf, resultDest->fileDesc); WriteToLocalFile(copyOutState->fe_msgbuf, &resultDest->fileCompat);
} }
} }
@ -453,7 +458,7 @@ RemoteFileDestReceiverShutdown(DestReceiver *destReceiver)
if (resultDest->writeLocalFile) if (resultDest->writeLocalFile)
{ {
FileClose(resultDest->fileDesc); FileClose(resultDest->fileCompat.fd);
} }
} }

View File

@ -138,7 +138,9 @@ CitusExecutorRun(QueryDesc *queryDesc,
EState *estate = queryDesc->estate; EState *estate = queryDesc->estate;
estate->es_processed = 0; estate->es_processed = 0;
#if PG_VERSION_NUM < 120000
estate->es_lastoid = InvalidOid; estate->es_lastoid = InvalidOid;
#endif
/* start and shutdown tuple receiver to simulate empty result */ /* start and shutdown tuple receiver to simulate empty result */
dest->rStartup(queryDesc->dest, CMD_SELECT, queryDesc->tupDesc); dest->rStartup(queryDesc->dest, CMD_SELECT, queryDesc->tupDesc);
@ -351,8 +353,8 @@ ReadFileIntoTupleStore(char *fileName, char *copyFormat, TupleDesc tupleDescript
ResetPerTupleExprContext(executorState); ResetPerTupleExprContext(executorState);
oldContext = MemoryContextSwitchTo(executorTupleContext); oldContext = MemoryContextSwitchTo(executorTupleContext);
nextRowFound = NextCopyFrom(copyState, executorExpressionContext, nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
columnValues, columnNulls, NULL); columnValues, columnNulls);
if (!nextRowFound) if (!nextRowFound)
{ {
MemoryContextSwitchTo(oldContext); MemoryContextSwitchTo(oldContext);

View File

@ -706,7 +706,8 @@ SortTupleStore(CitusScanState *scanState)
/* iterate over all the sorted tuples, add them to original tuplestore */ /* iterate over all the sorted tuples, add them to original tuplestore */
while (true) while (true)
{ {
TupleTableSlot *newSlot = MakeSingleTupleTableSlot(tupleDescriptor); TupleTableSlot *newSlot = MakeSingleTupleTableSlotCompat(tupleDescriptor,
&TTSOpsMinimalTuple);
bool found = tuplesort_gettupleslot(tuplesortstate, true, false, newSlot, NULL); bool found = tuplesort_gettupleslot(tuplesortstate, true, false, newSlot, NULL);
if (!found) if (!found)

View File

@ -42,13 +42,21 @@
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "distributed/worker_transaction.h" #include "distributed/worker_transaction.h"
#include "lib/stringinfo.h" #include "lib/stringinfo.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/nodeFuncs.h"
#endif
#include "nodes/nodes.h" #include "nodes/nodes.h"
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#include "nodes/primnodes.h" #include "nodes/primnodes.h"
#include "nodes/relation.h"
#include "optimizer/clauses.h" #include "optimizer/clauses.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/pathnodes.h"
#include "optimizer/optimizer.h"
#else
#include "nodes/relation.h"
#include "optimizer/predtest.h" #include "optimizer/predtest.h"
#endif
#include "optimizer/restrictinfo.h" #include "optimizer/restrictinfo.h"
#include "storage/lock.h" #include "storage/lock.h"
#include "storage/lmgr.h" #include "storage/lmgr.h"

View File

@ -15,6 +15,9 @@
#include "libpq-fe.h" #include "libpq-fe.h"
#include "miscadmin.h" #include "miscadmin.h"
#if PG_VERSION_NUM >= 120000
#include "access/genam.h"
#endif
#include "access/htup_details.h" #include "access/htup_details.h"
#include "access/sysattr.h" #include "access/sysattr.h"
#include "access/xact.h" #include "access/xact.h"
@ -27,11 +30,13 @@
#include "commands/extension.h" #include "commands/extension.h"
#include "distributed/connection_management.h" #include "distributed/connection_management.h"
#include "distributed/citus_nodes.h" #include "distributed/citus_nodes.h"
#include "distributed/listutils.h"
#include "distributed/master_metadata_utility.h" #include "distributed/master_metadata_utility.h"
#include "distributed/master_protocol.h" #include "distributed/master_protocol.h"
#include "distributed/metadata_cache.h" #include "distributed/metadata_cache.h"
#include "distributed/multi_join_order.h" #include "distributed/multi_join_order.h"
#include "distributed/multi_logical_optimizer.h" #include "distributed/multi_logical_optimizer.h"
#include "distributed/multi_physical_planner.h"
#include "distributed/pg_dist_colocation.h" #include "distributed/pg_dist_colocation.h"
#include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_partition.h"
#include "distributed/pg_dist_shard.h" #include "distributed/pg_dist_shard.h"
@ -53,7 +58,6 @@
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#include "utils/rel.h" #include "utils/rel.h"
#include "utils/syscache.h" #include "utils/syscache.h"
#include "utils/tqual.h"
/* Local functions forward declarations */ /* Local functions forward declarations */
@ -406,6 +410,35 @@ ErrorIfNotSuitableToGetSize(Oid relationId)
} }
/*
* CompareShardPlacementsByWorker compares two shard placements by their
* worker node name and port.
*/
int
CompareShardPlacementsByWorker(const void *leftElement, const void *rightElement)
{
const ShardPlacement *leftPlacement = *((const ShardPlacement **) leftElement);
const ShardPlacement *rightPlacement = *((const ShardPlacement **) rightElement);
int nodeNameCmp = strncmp(leftPlacement->nodeName, rightPlacement->nodeName,
WORKER_LENGTH);
if (nodeNameCmp != 0)
{
return nodeNameCmp;
}
else if (leftPlacement->nodePort > rightPlacement->nodePort)
{
return 1;
}
else if (leftPlacement->nodePort < rightPlacement->nodePort)
{
return -1;
}
return 0;
}
/* /*
* TableShardReplicationFactor returns the current replication factor of the * TableShardReplicationFactor returns the current replication factor of the
* given relation by looking into shard placements. It errors out if there * given relation by looking into shard placements. It errors out if there
@ -691,7 +724,7 @@ FinalizedShardPlacementList(uint64 shardId)
} }
} }
return finalizedPlacementList; return SortList(finalizedPlacementList, CompareShardPlacementsByWorker);
} }

View File

@ -47,9 +47,13 @@
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "distributed/worker_transaction.h" #include "distributed/worker_transaction.h"
#include "optimizer/clauses.h" #include "optimizer/clauses.h"
#if PG_VERSION_NUM >= 120000
#include "optimizer/optimizer.h"
#else
#include "optimizer/predtest.h" #include "optimizer/predtest.h"
#include "optimizer/restrictinfo.h"
#include "optimizer/var.h" #include "optimizer/var.h"
#endif
#include "optimizer/restrictinfo.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "tcop/tcopprot.h" #include "tcop/tcopprot.h"
#include "utils/builtins.h" #include "utils/builtins.h"

View File

@ -60,7 +60,6 @@
#include "utils/palloc.h" #include "utils/palloc.h"
#include "utils/relcache.h" #include "utils/relcache.h"
#include "utils/ruleutils.h" #include "utils/ruleutils.h"
#include "utils/tqual.h"
#include "utils/varlena.h" #include "utils/varlena.h"
@ -472,7 +471,6 @@ master_get_active_worker_nodes(PG_FUNCTION_ARGS)
MemoryContext oldContext = NULL; MemoryContext oldContext = NULL;
List *workerNodeList = NIL; List *workerNodeList = NIL;
TupleDesc tupleDescriptor = NULL; TupleDesc tupleDescriptor = NULL;
bool hasOid = false;
/* create a function context for cross-call persistence */ /* create a function context for cross-call persistence */
functionContext = SRF_FIRSTCALL_INIT(); functionContext = SRF_FIRSTCALL_INIT();
@ -490,7 +488,11 @@ master_get_active_worker_nodes(PG_FUNCTION_ARGS)
* This tuple descriptor must match the output parameters declared for * This tuple descriptor must match the output parameters declared for
* the function in pg_proc. * the function in pg_proc.
*/ */
tupleDescriptor = CreateTemplateTupleDesc(WORKER_NODE_FIELDS, hasOid); #if PG_VERSION_NUM < 120000
tupleDescriptor = CreateTemplateTupleDesc(WORKER_NODE_FIELDS, false);
#else
tupleDescriptor = CreateTemplateTupleDesc(WORKER_NODE_FIELDS);
#endif
TupleDescInitEntry(tupleDescriptor, (AttrNumber) 1, "node_name", TupleDescInitEntry(tupleDescriptor, (AttrNumber) 1, "node_name",
TEXTOID, -1, 0); TEXTOID, -1, 0);
TupleDescInitEntry(tupleDescriptor, (AttrNumber) 2, "node_port", TupleDescInitEntry(tupleDescriptor, (AttrNumber) 2, "node_port",

View File

@ -84,7 +84,7 @@ worker_hash(PG_FUNCTION_ARGS)
fmgr_info_copy(hashFunction, &(typeEntry->hash_proc_finfo), CurrentMemoryContext); fmgr_info_copy(hashFunction, &(typeEntry->hash_proc_finfo), CurrentMemoryContext);
/* calculate hash value */ /* calculate hash value */
hashedValueDatum = FunctionCall1(hashFunction, valueDatum); hashedValueDatum = FunctionCall1Coll(hashFunction, PG_GET_COLLATION(), valueDatum);
PG_RETURN_INT32(hashedValueDatum); PG_RETURN_INT32(hashedValueDatum);
} }

View File

@ -55,7 +55,6 @@
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#include "utils/syscache.h" #include "utils/syscache.h"
#include "utils/rel.h" #include "utils/rel.h"
#include "utils/tqual.h"
/* Local functions forward declarations */ /* Local functions forward declarations */

View File

@ -19,9 +19,7 @@
#include "distributed/metadata_cache.h" #include "distributed/metadata_cache.h"
#include "distributed/multi_client_executor.h" #include "distributed/multi_client_executor.h"
#include "libpq/hba.h" #include "libpq/hba.h"
#if (PG_VERSION_NUM >= 100000)
#include "common/ip.h" #include "common/ip.h"
#endif
#include "libpq/libpq-be.h" #include "libpq/libpq-be.h"
#include "postmaster/postmaster.h" #include "postmaster/postmaster.h"
#include "storage/fd.h" #include "storage/fd.h"

View File

@ -48,7 +48,6 @@
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#include "utils/memutils.h" #include "utils/memutils.h"
#include "utils/syscache.h" #include "utils/syscache.h"
#include "utils/tqual.h"
static char * LocalGroupIdUpdateCommand(int32 groupId); static char * LocalGroupIdUpdateCommand(int32 groupId);

View File

@ -203,7 +203,7 @@ UpdateRelationToShardNames(Node *node, List *relationShardList)
if (IsA(node, Query)) if (IsA(node, Query))
{ {
return query_tree_walker((Query *) node, UpdateRelationToShardNames, return query_tree_walker((Query *) node, UpdateRelationToShardNames,
relationShardList, QTW_EXAMINE_RTES); relationShardList, QTW_EXAMINE_RTES_BEFORE);
} }
if (!IsA(node, RangeTblEntry)) if (!IsA(node, RangeTblEntry))

View File

@ -35,7 +35,12 @@
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#include "parser/parsetree.h" #include "parser/parsetree.h"
#include "parser/parse_type.h" #include "parser/parse_type.h"
#if PG_VERSION_NUM >= 120000
#include "optimizer/optimizer.h"
#include "optimizer/plancat.h"
#else
#include "optimizer/cost.h" #include "optimizer/cost.h"
#endif
#include "optimizer/pathnode.h" #include "optimizer/pathnode.h"
#include "optimizer/planner.h" #include "optimizer/planner.h"
#include "utils/builtins.h" #include "utils/builtins.h"
@ -1327,6 +1332,11 @@ AdjustReadIntermediateResultCost(RangeTblEntry *rangeTableEntry, RelOptInfo *rel
double rowSizeEstimate = 0; double rowSizeEstimate = 0;
double rowCountEstimate = 0.; double rowCountEstimate = 0.;
double ioCost = 0.; double ioCost = 0.;
#if PG_VERSION_NUM >= 120000
QualCost funcCost = { 0., 0. };
#else
double funcCost = 0.;
#endif
if (rangeTableEntry->rtekind != RTE_FUNCTION || if (rangeTableEntry->rtekind != RTE_FUNCTION ||
list_length(rangeTableEntry->functions) != 1) list_length(rangeTableEntry->functions) != 1)
@ -1413,9 +1423,19 @@ AdjustReadIntermediateResultCost(RangeTblEntry *rangeTableEntry, RelOptInfo *rel
rowSizeEstimate += 1; rowSizeEstimate += 1;
} }
/* add the cost of parsing a column */ /* add the cost of parsing a column */
rowCost += get_func_cost(inputFunctionId) * cpu_operator_cost; #if PG_VERSION_NUM >= 120000
add_function_cost(NULL, inputFunctionId, NULL, &funcCost);
#else
funcCost += get_func_cost(inputFunctionId);
#endif
} }
#if PG_VERSION_NUM >= 120000
rowCost += funcCost.per_tuple;
#else
rowCost += funcCost * cpu_operator_cost;
#endif
/* estimate the number of rows based on the file size and estimated row size */ /* estimate the number of rows based on the file size and estimated row size */
rowCountEstimate = Max(1, (double) resultSize / rowSizeEstimate); rowCountEstimate = Max(1, (double) resultSize / rowSizeEstimate);
@ -1429,6 +1449,10 @@ AdjustReadIntermediateResultCost(RangeTblEntry *rangeTableEntry, RelOptInfo *rel
path = (Path *) linitial(pathList); path = (Path *) linitial(pathList);
path->rows = rowCountEstimate; path->rows = rowCountEstimate;
path->total_cost = rowCountEstimate * rowCost + ioCost; path->total_cost = rowCountEstimate * rowCost + ioCost;
#if PG_VERSION_NUM >= 120000
path->startup_cost = funcCost.startup + relOptInfo->baserestrictcost.startup;
#endif
} }

View File

@ -14,7 +14,12 @@
#include "distributed/metadata_cache.h" #include "distributed/metadata_cache.h"
#include "distributed/multi_logical_optimizer.h" #include "distributed/multi_logical_optimizer.h"
#include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_partition.h"
#if PG_VERSION_NUM >= 120000
#include "optimizer/optimizer.h"
#else
#include "optimizer/var.h" #include "optimizer/var.h"
#endif
#include "optimizer/restrictinfo.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"

View File

@ -41,10 +41,17 @@
#include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_partition.h"
#include "distributed/shardinterval_utils.h" #include "distributed/shardinterval_utils.h"
#include "distributed/shard_pruning.h" #include "distributed/shard_pruning.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/makefuncs.h"
#endif
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#if PG_VERSION_NUM >= 120000
#include "optimizer/optimizer.h"
#else
#include "optimizer/clauses.h" #include "optimizer/clauses.h"
#endif
bool EnableFastPathRouterPlanner = true; bool EnableFastPathRouterPlanner = true;

View File

@ -26,6 +26,7 @@
#include "distributed/query_pushdown_planning.h" #include "distributed/query_pushdown_planning.h"
#include "distributed/recursive_planning.h" #include "distributed/recursive_planning.h"
#include "distributed/resource_lock.h" #include "distributed/resource_lock.h"
#include "distributed/version_compat.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
@ -33,7 +34,11 @@
#include "optimizer/planner.h" #include "optimizer/planner.h"
#include "optimizer/restrictinfo.h" #include "optimizer/restrictinfo.h"
#include "optimizer/tlist.h" #include "optimizer/tlist.h"
#if PG_VERSION_NUM >= 120000
#include "optimizer/optimizer.h"
#else
#include "optimizer/var.h" #include "optimizer/var.h"
#endif
#include "parser/parsetree.h" #include "parser/parsetree.h"
#include "parser/parse_coerce.h" #include "parser/parse_coerce.h"
#include "parser/parse_relation.h" #include "parser/parse_relation.h"
@ -765,7 +770,7 @@ MultiTaskRouterSelectQuerySupported(Query *query)
Assert(subquery->commandType == CMD_SELECT); Assert(subquery->commandType == CMD_SELECT);
/* pushing down rtes without relations yields (shardCount * expectedRows) */ /* pushing down rtes without relations yields (shardCount * expectedRows) */
if (subquery->rtable == NIL) if (HasEmptyJoinTree(subquery))
{ {
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"Subqueries without relations are not allowed in " "Subqueries without relations are not allowed in "

View File

@ -24,7 +24,12 @@
#include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_partition.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "lib/stringinfo.h" #include "lib/stringinfo.h"
#if PG_VERSION_NUM >= 120000
#include "optimizer/optimizer.h"
#else
#include "optimizer/var.h" #include "optimizer/var.h"
#endif
#include "utils/builtins.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/datum.h" #include "utils/datum.h"

View File

@ -35,12 +35,17 @@
#include "distributed/multi_physical_planner.h" #include "distributed/multi_physical_planner.h"
#include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_partition.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "distributed/version_compat.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#include "nodes/print.h" #include "nodes/print.h"
#include "optimizer/clauses.h" #include "optimizer/clauses.h"
#include "optimizer/tlist.h" #include "optimizer/tlist.h"
#if PG_VERSION_NUM >= 120000
#include "optimizer/optimizer.h"
#else
#include "optimizer/var.h" #include "optimizer/var.h"
#endif
#include "parser/parse_agg.h" #include "parser/parse_agg.h"
#include "parser/parse_coerce.h" #include "parser/parse_coerce.h"
#include "parser/parse_oper.h" #include "parser/parse_oper.h"
@ -49,7 +54,6 @@
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
#include "utils/rel.h" #include "utils/rel.h"
#include "utils/syscache.h" #include "utils/syscache.h"
#include "utils/tqual.h"
/* Config variable managed via guc.c */ /* Config variable managed via guc.c */
@ -2962,7 +2966,11 @@ AggregateFunctionOid(const char *functionName, Oid inputType)
/* check if input type and found value type match */ /* check if input type and found value type match */
if (procForm->proargtypes.values[0] == inputType) if (procForm->proargtypes.values[0] == inputType)
{ {
#if PG_VERSION_NUM < 120000
functionOid = HeapTupleGetOid(heapTuple); functionOid = HeapTupleGetOid(heapTuple);
#else
functionOid = procForm->oid;
#endif
break; break;
} }
} }
@ -2992,8 +3000,9 @@ TypeOid(Oid schemaId, const char *typeName)
{ {
Oid typeOid; Oid typeOid;
typeOid = GetSysCacheOid2(TYPENAMENSP, PointerGetDatum(typeName), typeOid = GetSysCacheOid2Compat(TYPENAMENSP, Anum_pg_type_oid, PointerGetDatum(
ObjectIdGetDatum(schemaId)); typeName),
ObjectIdGetDatum(schemaId));
return typeOid; return typeOid;
} }

View File

@ -33,11 +33,16 @@
#include "distributed/version_compat.h" #include "distributed/version_compat.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/pathnodes.h"
#include "optimizer/optimizer.h"
#else
#include "nodes/relation.h" #include "nodes/relation.h"
#include "optimizer/var.h"
#endif
#include "optimizer/clauses.h" #include "optimizer/clauses.h"
#include "optimizer/prep.h" #include "optimizer/prep.h"
#include "optimizer/tlist.h" #include "optimizer/tlist.h"
#include "optimizer/var.h"
#include "parser/parsetree.h" #include "parser/parsetree.h"
#include "utils/datum.h" #include "utils/datum.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"
@ -150,7 +155,7 @@ MultiLogicalPlanCreate(Query *originalQuery, Query *queryTree,
* FindNodeCheck finds a node for which the check function returns true. * FindNodeCheck finds a node for which the check function returns true.
* *
* To call this function directly with an RTE, use: * To call this function directly with an RTE, use:
* range_table_walker(rte, FindNodeCheck, check, QTW_EXAMINE_RTES) * range_table_walker(rte, FindNodeCheck, check, QTW_EXAMINE_RTES_BEFORE)
*/ */
bool bool
FindNodeCheck(Node *node, bool (*check)(Node *)) FindNodeCheck(Node *node, bool (*check)(Node *))
@ -172,7 +177,8 @@ FindNodeCheck(Node *node, bool (*check)(Node *))
} }
else if (IsA(node, Query)) else if (IsA(node, Query))
{ {
return query_tree_walker((Query *) node, FindNodeCheck, check, QTW_EXAMINE_RTES); return query_tree_walker((Query *) node, FindNodeCheck, check,
QTW_EXAMINE_RTES_BEFORE);
} }
return expression_tree_walker(node, FindNodeCheck, check); return expression_tree_walker(node, FindNodeCheck, check);
@ -380,7 +386,7 @@ AllTargetExpressionsAreColumnReferences(List *targetEntryList)
bool bool
FindNodeCheckInRangeTableList(List *rtable, bool (*check)(Node *)) FindNodeCheckInRangeTableList(List *rtable, bool (*check)(Node *))
{ {
return range_table_walker(rtable, FindNodeCheck, check, QTW_EXAMINE_RTES); return range_table_walker(rtable, FindNodeCheck, check, QTW_EXAMINE_RTES_BEFORE);
} }
@ -1992,7 +1998,8 @@ ExtractRangeTableRelationWalker(Node *node, List **rangeTableRelationList)
{ {
walkIsComplete = query_tree_walker((Query *) node, walkIsComplete = query_tree_walker((Query *) node,
ExtractRangeTableRelationWalker, ExtractRangeTableRelationWalker,
rangeTableRelationList, QTW_EXAMINE_RTES); rangeTableRelationList,
QTW_EXAMINE_RTES_BEFORE);
} }
else else
{ {
@ -2040,7 +2047,7 @@ ExtractRangeTableEntryWalker(Node *node, List **rangeTableList)
walkIsComplete = query_tree_walker((Query *) node, walkIsComplete = query_tree_walker((Query *) node,
ExtractRangeTableEntryWalker, ExtractRangeTableEntryWalker,
rangeTableList, rangeTableList,
QTW_EXAMINE_RTES); QTW_EXAMINE_RTES_BEFORE);
} }
else else
{ {
@ -2048,7 +2055,7 @@ ExtractRangeTableEntryWalker(Node *node, List **rangeTableList)
walkIsComplete = range_table_walker(query->rtable, walkIsComplete = range_table_walker(query->rtable,
ExtractRangeTableEntryWalker, ExtractRangeTableEntryWalker,
rangeTableList, rangeTableList,
QTW_EXAMINE_RTES); QTW_EXAMINE_RTES_BEFORE);
} }
} }
else else

View File

@ -21,6 +21,7 @@
#include "distributed/multi_physical_planner.h" #include "distributed/multi_physical_planner.h"
#include "distributed/distributed_planner.h" #include "distributed/distributed_planner.h"
#include "distributed/multi_server_executor.h" #include "distributed/multi_server_executor.h"
#include "distributed/version_compat.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
@ -29,7 +30,11 @@
#include "optimizer/cost.h" #include "optimizer/cost.h"
#include "optimizer/planmain.h" #include "optimizer/planmain.h"
#include "optimizer/tlist.h" #include "optimizer/tlist.h"
#if PG_VERSION_NUM >= 120000
#include "optimizer/optimizer.h"
#else
#include "optimizer/var.h" #include "optimizer/var.h"
#endif
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/guc.h" #include "utils/guc.h"
#include "utils/memutils.h" #include "utils/memutils.h"
@ -47,6 +52,8 @@ static bool UseGroupAggregateWithHLL(Query *masterQuery);
static bool QueryContainsAggregateWithHLL(Query *query); static bool QueryContainsAggregateWithHLL(Query *query);
static Plan * BuildDistinctPlan(Query *masterQuery, Plan *subPlan); static Plan * BuildDistinctPlan(Query *masterQuery, Plan *subPlan);
static List * PrepareTargetListForNextPlan(List *targetList); static List * PrepareTargetListForNextPlan(List *targetList);
static Agg * makeAggNode(List *groupClauseList, List *havingQual,
AggStrategy aggrStrategy, List *queryTargetList, Plan *subPlan);
/* /*
@ -263,7 +270,6 @@ BuildAggregatePlan(Query *masterQuery, Plan *subPlan)
Agg *aggregatePlan = NULL; Agg *aggregatePlan = NULL;
AggStrategy aggregateStrategy = AGG_PLAIN; AggStrategy aggregateStrategy = AGG_PLAIN;
AggClauseCosts aggregateCosts; AggClauseCosts aggregateCosts;
AttrNumber *groupColumnIdArray = NULL;
List *aggregateTargetList = NIL; List *aggregateTargetList = NIL;
List *groupColumnList = NIL; List *groupColumnList = NIL;
List *aggregateColumnList = NIL; List *aggregateColumnList = NIL;
@ -271,9 +277,7 @@ BuildAggregatePlan(Query *masterQuery, Plan *subPlan)
List *columnList = NIL; List *columnList = NIL;
ListCell *columnCell = NULL; ListCell *columnCell = NULL;
Node *havingQual = NULL; Node *havingQual = NULL;
Oid *groupColumnOpArray = NULL;
uint32 groupColumnCount = 0; uint32 groupColumnCount = 0;
const long rowEstimate = 10;
/* assert that we need to build an aggregate plan */ /* assert that we need to build an aggregate plan */
Assert(masterQuery->hasAggs || masterQuery->groupClause); Assert(masterQuery->hasAggs || masterQuery->groupClause);
@ -349,17 +353,11 @@ BuildAggregatePlan(Query *masterQuery, Plan *subPlan)
{ {
aggregateStrategy = AGG_HASHED; aggregateStrategy = AGG_HASHED;
} }
/* get column indexes that are being grouped */
groupColumnIdArray = extract_grouping_cols(groupColumnList, subPlan->targetlist);
groupColumnOpArray = extract_grouping_ops(groupColumnList);
} }
/* finally create the plan */ /* finally create the plan */
aggregatePlan = make_agg(aggregateTargetList, (List *) havingQual, aggregateStrategy, aggregatePlan = makeAggNode(groupColumnList, (List *) havingQual,
AGGSPLIT_SIMPLE, groupColumnCount, groupColumnIdArray, aggregateStrategy, aggregateTargetList, subPlan);
groupColumnOpArray, NIL, NIL,
rowEstimate, subPlan);
/* just for reproducible costs between different PostgreSQL versions */ /* just for reproducible costs between different PostgreSQL versions */
aggregatePlan->plan.startup_cost = 0; aggregatePlan->plan.startup_cost = 0;
@ -523,17 +521,8 @@ BuildDistinctPlan(Query *masterQuery, Plan *subPlan)
if (enable_hashagg && distinctClausesHashable && !hasDistinctAggregate) if (enable_hashagg && distinctClausesHashable && !hasDistinctAggregate)
{ {
const long rowEstimate = 10; /* using the same value as BuildAggregatePlan() */ distinctPlan = (Plan *) makeAggNode(distinctClauseList, NIL, AGG_HASHED,
AttrNumber *distinctColumnIdArray = extract_grouping_cols(distinctClauseList, targetList, subPlan);
subPlan->targetlist);
Oid *distinctColumnOpArray = extract_grouping_ops(distinctClauseList);
uint32 distinctClauseCount = list_length(distinctClauseList);
distinctPlan = (Plan *) make_agg(targetList, NIL, AGG_HASHED,
AGGSPLIT_SIMPLE, distinctClauseCount,
distinctColumnIdArray,
distinctColumnOpArray, NIL, NIL,
rowEstimate, subPlan);
} }
else else
{ {
@ -577,3 +566,36 @@ PrepareTargetListForNextPlan(List *targetList)
return newtargetList; return newtargetList;
} }
/*
* makeAggNode creates a "Agg" plan node. groupClauseList is a list of
* SortGroupClause's.
*/
static Agg *
makeAggNode(List *groupClauseList, List *havingQual, AggStrategy aggrStrategy,
List *queryTargetList, Plan *subPlan)
{
Agg *aggNode = NULL;
int groupColumnCount = list_length(groupClauseList);
AttrNumber *groupColumnIdArray =
extract_grouping_cols(groupClauseList, subPlan->targetlist);
Oid *groupColumnOpArray = extract_grouping_ops(groupClauseList);
const int rowEstimate = 10;
#if (PG_VERSION_NUM >= 120000)
aggNode = make_agg(queryTargetList, havingQual, aggrStrategy,
AGGSPLIT_SIMPLE, groupColumnCount, groupColumnIdArray,
groupColumnOpArray,
extract_grouping_collations(groupClauseList,
subPlan->targetlist),
NIL, NIL, rowEstimate, subPlan);
#else
aggNode = make_agg(queryTargetList, havingQual, aggrStrategy,
AGGSPLIT_SIMPLE, groupColumnCount, groupColumnIdArray,
groupColumnOpArray,
NIL, NIL, rowEstimate, subPlan);
#endif
return aggNode;
}

View File

@ -54,9 +54,14 @@
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h" #include "optimizer/clauses.h"
#include "optimizer/predtest.h" #if PG_VERSION_NUM >= 120000
#include "optimizer/restrictinfo.h" #include "nodes/pathnodes.h"
#include "optimizer/optimizer.h"
#else
#include "nodes/relation.h"
#include "optimizer/var.h" #include "optimizer/var.h"
#endif
#include "optimizer/restrictinfo.h"
#include "parser/parse_relation.h" #include "parser/parse_relation.h"
#include "parser/parsetree.h" #include "parser/parsetree.h"
#include "utils/builtins.h" #include "utils/builtins.h"

View File

@ -11,7 +11,6 @@
*/ */
#include "postgres.h" #include "postgres.h"
#include "c.h"
#include <stddef.h> #include <stddef.h>
@ -57,9 +56,13 @@
#include "optimizer/joininfo.h" #include "optimizer/joininfo.h"
#include "optimizer/pathnode.h" #include "optimizer/pathnode.h"
#include "optimizer/paths.h" #include "optimizer/paths.h"
#include "optimizer/predtest.h" #if PG_VERSION_NUM >= 120000
#include "optimizer/restrictinfo.h" #include "optimizer/optimizer.h"
#else
#include "optimizer/var.h" #include "optimizer/var.h"
#include "optimizer/predtest.h"
#endif
#include "optimizer/restrictinfo.h"
#include "parser/parsetree.h" #include "parser/parsetree.h"
#include "parser/parse_oper.h" #include "parser/parse_oper.h"
#include "storage/lock.h" #include "storage/lock.h"
@ -676,7 +679,11 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
NULL, NULL); NULL, NULL);
} }
} }
else if (rangeTableEntry->rtekind == RTE_VALUES) else if (rangeTableEntry->rtekind == RTE_VALUES
#if PG_VERSION_NUM >= 120000
|| rangeTableEntry->rtekind == RTE_RESULT
#endif
)
{ {
/* do nothing, this type is supported */ /* do nothing, this type is supported */
} }

View File

@ -15,15 +15,77 @@
#include "distributed/multi_master_planner.h" #include "distributed/multi_master_planner.h"
#include "nodes/plannodes.h" #include "nodes/plannodes.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/nodeFuncs.h"
#include "optimizer/optimizer.h"
#else
#include "optimizer/tlist.h" #include "optimizer/tlist.h"
#endif
/* /*
* make_unique_from_sortclauses creates and returns a unique node * make_unique_from_sortclauses creates and returns a unique node
* from provided distinct clause list. * from provided distinct clause list.
* The functions is copied from postgresql from * The functions is copied from postgresql from
* src/backend/optimizer/plan/createplan.c. * src/backend/optimizer/plan/createplan.c.
* */
#if PG_VERSION_NUM >= 120000
/*
* distinctList is a list of SortGroupClauses, identifying the targetlist items
* that should be considered by the Unique filter. The input path must
* already be sorted accordingly.
*/
Unique *
make_unique_from_sortclauses(Plan *lefttree, List *distinctList)
{
Unique *node = makeNode(Unique);
Plan *plan = &node->plan;
int numCols = list_length(distinctList);
int keyno = 0;
AttrNumber *uniqColIdx;
Oid *uniqOperators;
Oid *uniqCollations;
ListCell *slitem;
plan->targetlist = lefttree->targetlist;
plan->qual = NIL;
plan->lefttree = lefttree;
plan->righttree = NULL;
/*
* convert SortGroupClause list into arrays of attr indexes and equality
* operators, as wanted by executor
*/
Assert(numCols > 0);
uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols);
uniqCollations = (Oid *) palloc(sizeof(Oid) * numCols);
foreach(slitem, distinctList)
{
SortGroupClause *sortcl = (SortGroupClause *) lfirst(slitem);
TargetEntry *tle = get_sortgroupclause_tle(sortcl, plan->targetlist);
uniqColIdx[keyno] = tle->resno;
uniqOperators[keyno] = sortcl->eqop;
uniqCollations[keyno] = exprCollation((Node *) tle->expr);
Assert(OidIsValid(uniqOperators[keyno]));
keyno++;
}
node->numCols = numCols;
node->uniqColIdx = uniqColIdx;
node->uniqOperators = uniqOperators;
node->uniqCollations = uniqCollations;
return node;
}
#else
/*
* distinctList is a list of SortGroupClauses, identifying the targetlist items * distinctList is a list of SortGroupClauses, identifying the targetlist items
* that should be considered by the Unique filter. The input path must * that should be considered by the Unique filter. The input path must
* already be sorted accordingly. * already be sorted accordingly.
@ -69,3 +131,6 @@ make_unique_from_sortclauses(Plan *lefttree, List *distinctList)
return node; return node;
} }
#endif

View File

@ -31,10 +31,16 @@
#include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_partition.h"
#include "distributed/query_pushdown_planning.h" #include "distributed/query_pushdown_planning.h"
#include "distributed/relation_restriction_equivalence.h" #include "distributed/relation_restriction_equivalence.h"
#include "distributed/version_compat.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/makefuncs.h"
#include "optimizer/optimizer.h"
#else
#include "optimizer/var.h"
#endif
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#include "optimizer/clauses.h" #include "optimizer/clauses.h"
#include "optimizer/var.h"
#include "parser/parsetree.h" #include "parser/parsetree.h"
@ -194,6 +200,32 @@ JoinTreeContainsSubquery(Query *query)
} }
/*
* HasEmptyJoinTree returns whether the query selects from anything.
*/
bool
HasEmptyJoinTree(Query *query)
{
if (query->rtable == NIL)
{
return true;
}
#if PG_VERSION_NUM >= 120000
else if (list_length(query->rtable) == 1)
{
RangeTblEntry *rte = (RangeTblEntry *) linitial(query->rtable);
if (rte->rtekind == RTE_RESULT)
{
return true;
}
}
#endif
return false;
}
/* /*
* JoinTreeContainsSubqueryWalker returns true if the input joinTreeNode * JoinTreeContainsSubqueryWalker returns true if the input joinTreeNode
* references to a subquery. Otherwise, recurses into the expression. * references to a subquery. Otherwise, recurses into the expression.
@ -651,7 +683,7 @@ FromClauseRecurringTupleType(Query *queryTree)
{ {
RecurringTuplesType recurType = RECURRING_TUPLES_INVALID; RecurringTuplesType recurType = RECURRING_TUPLES_INVALID;
if (queryTree->rtable == NIL) if (HasEmptyJoinTree(queryTree))
{ {
return RECURRING_TUPLES_EMPTY_JOIN_TREE; return RECURRING_TUPLES_EMPTY_JOIN_TREE;
} }
@ -817,7 +849,7 @@ DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLi
return deferredError; return deferredError;
} }
if (subqueryTree->rtable == NIL && if (HasEmptyJoinTree(subqueryTree) &&
contain_mutable_functions((Node *) subqueryTree->targetList)) contain_mutable_functions((Node *) subqueryTree->targetList))
{ {
preconditionsSatisfied = false; preconditionsSatisfied = false;
@ -1009,7 +1041,11 @@ DeferErrorIfUnsupportedTableCombination(Query *queryTree)
* subquery, or immutable function. * subquery, or immutable function.
*/ */
if (rangeTableEntry->rtekind == RTE_RELATION || if (rangeTableEntry->rtekind == RTE_RELATION ||
rangeTableEntry->rtekind == RTE_SUBQUERY) rangeTableEntry->rtekind == RTE_SUBQUERY
#if PG_VERSION_NUM >= 120000
|| rangeTableEntry->rtekind == RTE_RESULT
#endif
)
{ {
/* accepted */ /* accepted */
} }
@ -1332,7 +1368,7 @@ static bool
IsRecurringRangeTable(List *rangeTable, RecurringTuplesType *recurType) IsRecurringRangeTable(List *rangeTable, RecurringTuplesType *recurType)
{ {
return range_table_walker(rangeTable, HasRecurringTuples, recurType, return range_table_walker(rangeTable, HasRecurringTuples, recurType,
QTW_EXAMINE_RTES); QTW_EXAMINE_RTES_BEFORE);
} }
@ -1388,6 +1424,13 @@ HasRecurringTuples(Node *node, RecurringTuplesType *recurType)
*/ */
return true; return true;
} }
#if PG_VERSION_NUM >= 120000
else if (rangeTableEntry->rtekind == RTE_RESULT)
{
*recurType = RECURRING_TUPLES_EMPTY_JOIN_TREE;
return true;
}
#endif
return false; return false;
} }
@ -1395,7 +1438,7 @@ HasRecurringTuples(Node *node, RecurringTuplesType *recurType)
{ {
Query *query = (Query *) node; Query *query = (Query *) node;
if (query->rtable == NIL) if (HasEmptyJoinTree(query))
{ {
*recurType = RECURRING_TUPLES_EMPTY_JOIN_TREE; *recurType = RECURRING_TUPLES_EMPTY_JOIN_TREE;
@ -1407,7 +1450,7 @@ HasRecurringTuples(Node *node, RecurringTuplesType *recurType)
} }
return query_tree_walker((Query *) node, HasRecurringTuples, return query_tree_walker((Query *) node, HasRecurringTuples,
recurType, QTW_EXAMINE_RTES); recurType, QTW_EXAMINE_RTES_BEFORE);
} }
return expression_tree_walker(node, HasRecurringTuples, recurType); return expression_tree_walker(node, HasRecurringTuples, recurType);

View File

@ -78,7 +78,11 @@
#include "nodes/nodes.h" #include "nodes/nodes.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#include "nodes/primnodes.h" #include "nodes/primnodes.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/pathnodes.h"
#else
#include "nodes/relation.h" #include "nodes/relation.h"
#endif
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/guc.h" #include "utils/guc.h"
@ -1218,7 +1222,8 @@ CteReferenceListWalker(Node *node, CteReferenceWalkerContext *context)
Query *query = (Query *) node; Query *query = (Query *) node;
context->level += 1; context->level += 1;
query_tree_walker(query, CteReferenceListWalker, context, QTW_EXAMINE_RTES); query_tree_walker(query, CteReferenceListWalker, context,
QTW_EXAMINE_RTES_BEFORE);
context->level -= 1; context->level -= 1;
return false; return false;

View File

@ -19,7 +19,11 @@
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#include "nodes/primnodes.h" #include "nodes/primnodes.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/pathnodes.h"
#else
#include "nodes/relation.h" #include "nodes/relation.h"
#endif
#include "parser/parsetree.h" #include "parser/parsetree.h"
#include "optimizer/pathnode.h" #include "optimizer/pathnode.h"

View File

@ -59,6 +59,7 @@
#include "distributed/multi_physical_planner.h" #include "distributed/multi_physical_planner.h"
#include "distributed/shardinterval_utils.h" #include "distributed/shardinterval_utils.h"
#include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_partition.h"
#include "distributed/version_compat.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
@ -138,6 +139,17 @@ typedef struct PendingPruningInstance
Node *continueAt; Node *continueAt;
} PendingPruningInstance; } PendingPruningInstance;
#if PG_VERSION_NUM >= 120000
typedef union \
{ \
FunctionCallInfoBaseData fcinfo; \
/* ensure enough space for nargs args is available */ \
char fcinfo_data[SizeForFunctionCallInfo(2)]; \
} FunctionCall2InfoData;
#else
typedef FunctionCallInfoData FunctionCall2InfoData;
typedef FunctionCallInfoData *FunctionCallInfo;
#endif
/* /*
* Data necessary to perform a single PruneShards(). * Data necessary to perform a single PruneShards().
@ -161,11 +173,11 @@ typedef struct ClauseWalkerContext
/* /*
* Information about function calls we need to perform. Re-using the same * Information about function calls we need to perform. Re-using the same
* FunctionCallInfoData, instead of using FunctionCall2Coll, is often * FunctionCall2InfoData, instead of using FunctionCall2Coll, is often
* cheaper. * cheaper.
*/ */
FunctionCallInfoData compareValueFunctionCall; FunctionCall2InfoData compareValueFunctionCall;
FunctionCallInfoData compareIntervalFunctionCall; FunctionCall2InfoData compareIntervalFunctionCall;
} ClauseWalkerContext; } ClauseWalkerContext;
static void PrunableExpressions(Node *originalNode, ClauseWalkerContext *context); static void PrunableExpressions(Node *originalNode, ClauseWalkerContext *context);
@ -184,9 +196,9 @@ static void AddNewConjuction(ClauseWalkerContext *context, OpExpr *op);
static PruningInstance * CopyPartialPruningInstance(PruningInstance *sourceInstance); static PruningInstance * CopyPartialPruningInstance(PruningInstance *sourceInstance);
static List * ShardArrayToList(ShardInterval **shardArray, int length); static List * ShardArrayToList(ShardInterval **shardArray, int length);
static List * DeepCopyShardIntervalList(List *originalShardIntervalList); static List * DeepCopyShardIntervalList(List *originalShardIntervalList);
static int PerformValueCompare(FunctionCallInfoData *compareFunctionCall, Datum a, static int PerformValueCompare(FunctionCallInfo compareFunctionCall, Datum a,
Datum b); Datum b);
static int PerformCompare(FunctionCallInfoData *compareFunctionCall); static int PerformCompare(FunctionCallInfo compareFunctionCall);
static List * PruneOne(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context, static List * PruneOne(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context,
PruningInstance *prune); PruningInstance *prune);
@ -201,11 +213,11 @@ static bool ExhaustivePruneOne(ShardInterval *curInterval,
PruningInstance *prune); PruningInstance *prune);
static int UpperShardBoundary(Datum partitionColumnValue, static int UpperShardBoundary(Datum partitionColumnValue,
ShardInterval **shardIntervalCache, ShardInterval **shardIntervalCache,
int shardCount, FunctionCallInfoData *compareFunction, int shardCount, FunctionCallInfo compareFunction,
bool includeMin); bool includeMin);
static int LowerShardBoundary(Datum partitionColumnValue, static int LowerShardBoundary(Datum partitionColumnValue,
ShardInterval **shardIntervalCache, ShardInterval **shardIntervalCache,
int shardCount, FunctionCallInfoData *compareFunction, int shardCount, FunctionCallInfo compareFunction,
bool includeMax); bool includeMax);
@ -261,7 +273,8 @@ PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList,
if (cacheEntry->shardIntervalCompareFunction) if (cacheEntry->shardIntervalCompareFunction)
{ {
/* initiate function call info once (allows comparators to cache metadata) */ /* initiate function call info once (allows comparators to cache metadata) */
InitFunctionCallInfoData(context.compareIntervalFunctionCall, InitFunctionCallInfoData(*(FunctionCallInfo) &
context.compareIntervalFunctionCall,
cacheEntry->shardIntervalCompareFunction, cacheEntry->shardIntervalCompareFunction,
2, DEFAULT_COLLATION_OID, NULL, NULL); 2, DEFAULT_COLLATION_OID, NULL, NULL);
} }
@ -274,7 +287,8 @@ PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList,
if (cacheEntry->shardColumnCompareFunction) if (cacheEntry->shardColumnCompareFunction)
{ {
/* initiate function call info once (allows comparators to cache metadata) */ /* initiate function call info once (allows comparators to cache metadata) */
InitFunctionCallInfoData(context.compareValueFunctionCall, InitFunctionCallInfoData(*(FunctionCallInfo) &
context.compareValueFunctionCall,
cacheEntry->shardColumnCompareFunction, cacheEntry->shardColumnCompareFunction,
2, DEFAULT_COLLATION_OID, NULL, NULL); 2, DEFAULT_COLLATION_OID, NULL, NULL);
} }
@ -753,7 +767,8 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
case BTLessStrategyNumber: case BTLessStrategyNumber:
{ {
if (!prune->lessConsts || if (!prune->lessConsts ||
PerformValueCompare(&context->compareValueFunctionCall, PerformValueCompare((FunctionCallInfo) &
context->compareValueFunctionCall,
constantClause->constvalue, constantClause->constvalue,
prune->lessConsts->constvalue) < 0) prune->lessConsts->constvalue) < 0)
{ {
@ -766,7 +781,8 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
case BTLessEqualStrategyNumber: case BTLessEqualStrategyNumber:
{ {
if (!prune->lessEqualConsts || if (!prune->lessEqualConsts ||
PerformValueCompare(&context->compareValueFunctionCall, PerformValueCompare((FunctionCallInfo) &
context->compareValueFunctionCall,
constantClause->constvalue, constantClause->constvalue,
prune->lessEqualConsts->constvalue) < 0) prune->lessEqualConsts->constvalue) < 0)
{ {
@ -782,7 +798,8 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
{ {
prune->equalConsts = constantClause; prune->equalConsts = constantClause;
} }
else if (PerformValueCompare(&context->compareValueFunctionCall, else if (PerformValueCompare((FunctionCallInfo) &
context->compareValueFunctionCall,
constantClause->constvalue, constantClause->constvalue,
prune->equalConsts->constvalue) != 0) prune->equalConsts->constvalue) != 0)
{ {
@ -796,7 +813,8 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
case BTGreaterEqualStrategyNumber: case BTGreaterEqualStrategyNumber:
{ {
if (!prune->greaterEqualConsts || if (!prune->greaterEqualConsts ||
PerformValueCompare(&context->compareValueFunctionCall, PerformValueCompare((FunctionCallInfo) &
context->compareValueFunctionCall,
constantClause->constvalue, constantClause->constvalue,
prune->greaterEqualConsts->constvalue) > 0 prune->greaterEqualConsts->constvalue) > 0
) )
@ -810,7 +828,8 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
case BTGreaterStrategyNumber: case BTGreaterStrategyNumber:
{ {
if (!prune->greaterConsts || if (!prune->greaterConsts ||
PerformValueCompare(&context->compareValueFunctionCall, PerformValueCompare((FunctionCallInfo) &
context->compareValueFunctionCall,
constantClause->constvalue, constantClause->constvalue,
prune->greaterConsts->constvalue) > 0) prune->greaterConsts->constvalue) > 0)
{ {
@ -1133,7 +1152,7 @@ PruneOne(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context,
* unexpected NULL returns. * unexpected NULL returns.
*/ */
static int static int
PerformCompare(FunctionCallInfoData *compareFunctionCall) PerformCompare(FunctionCallInfo compareFunctionCall)
{ {
Datum result = FunctionCallInvoke(compareFunctionCall); Datum result = FunctionCallInvoke(compareFunctionCall);
@ -1151,12 +1170,10 @@ PerformCompare(FunctionCallInfoData *compareFunctionCall)
* NULL returns. * NULL returns.
*/ */
static int static int
PerformValueCompare(FunctionCallInfoData *compareFunctionCall, Datum a, Datum b) PerformValueCompare(FunctionCallInfo compareFunctionCall, Datum a, Datum b)
{ {
compareFunctionCall->arg[0] = a; fcSetArg(compareFunctionCall, 0, a);
compareFunctionCall->argnull[0] = false; fcSetArg(compareFunctionCall, 1, b);
compareFunctionCall->arg[1] = b;
compareFunctionCall->argnull[1] = false;
return PerformCompare(compareFunctionCall); return PerformCompare(compareFunctionCall);
} }
@ -1168,7 +1185,7 @@ PerformValueCompare(FunctionCallInfoData *compareFunctionCall, Datum a, Datum b)
*/ */
static int static int
LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCache, LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCache,
int shardCount, FunctionCallInfoData *compareFunction, bool includeMax) int shardCount, FunctionCallInfo compareFunction, bool includeMax)
{ {
int lowerBoundIndex = 0; int lowerBoundIndex = 0;
int upperBoundIndex = shardCount; int upperBoundIndex = shardCount;
@ -1176,8 +1193,7 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
Assert(shardCount != 0); Assert(shardCount != 0);
/* setup partitionColumnValue argument once */ /* setup partitionColumnValue argument once */
compareFunction->arg[0] = partitionColumnValue; fcSetArg(compareFunction, 0, partitionColumnValue);
compareFunction->argnull[0] = false;
while (lowerBoundIndex < upperBoundIndex) while (lowerBoundIndex < upperBoundIndex)
{ {
@ -1186,8 +1202,7 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
int minValueComparison = 0; int minValueComparison = 0;
/* setup minValue as argument */ /* setup minValue as argument */
compareFunction->arg[1] = shardIntervalCache[middleIndex]->minValue; fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->minValue);
compareFunction->argnull[1] = false;
/* execute cmp(partitionValue, lowerBound) */ /* execute cmp(partitionValue, lowerBound) */
minValueComparison = PerformCompare(compareFunction); minValueComparison = PerformCompare(compareFunction);
@ -1201,8 +1216,7 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
} }
/* setup maxValue as argument */ /* setup maxValue as argument */
compareFunction->arg[1] = shardIntervalCache[middleIndex]->maxValue; fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->maxValue);
compareFunction->argnull[1] = false;
/* execute cmp(partitionValue, upperBound) */ /* execute cmp(partitionValue, upperBound) */
maxValueComparison = PerformCompare(compareFunction); maxValueComparison = PerformCompare(compareFunction);
@ -1249,7 +1263,7 @@ LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
*/ */
static int static int
UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCache, UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCache,
int shardCount, FunctionCallInfoData *compareFunction, bool includeMin) int shardCount, FunctionCallInfo compareFunction, bool includeMin)
{ {
int lowerBoundIndex = 0; int lowerBoundIndex = 0;
int upperBoundIndex = shardCount; int upperBoundIndex = shardCount;
@ -1257,8 +1271,7 @@ UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
Assert(shardCount != 0); Assert(shardCount != 0);
/* setup partitionColumnValue argument once */ /* setup partitionColumnValue argument once */
compareFunction->arg[0] = partitionColumnValue; fcSetArg(compareFunction, 0, partitionColumnValue);
compareFunction->argnull[0] = false;
while (lowerBoundIndex < upperBoundIndex) while (lowerBoundIndex < upperBoundIndex)
{ {
@ -1267,8 +1280,7 @@ UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
int minValueComparison = 0; int minValueComparison = 0;
/* setup minValue as argument */ /* setup minValue as argument */
compareFunction->arg[1] = shardIntervalCache[middleIndex]->minValue; fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->minValue);
compareFunction->argnull[1] = false;
/* execute cmp(partitionValue, lowerBound) */ /* execute cmp(partitionValue, lowerBound) */
minValueComparison = PerformCompare(compareFunction); minValueComparison = PerformCompare(compareFunction);
@ -1283,8 +1295,7 @@ UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCach
} }
/* setup maxValue as argument */ /* setup maxValue as argument */
compareFunction->arg[1] = shardIntervalCache[middleIndex]->maxValue; fcSetArg(compareFunction, 1, shardIntervalCache[middleIndex]->maxValue);
compareFunction->argnull[1] = false;
/* execute cmp(partitionValue, upperBound) */ /* execute cmp(partitionValue, upperBound) */
maxValueComparison = PerformCompare(compareFunction); maxValueComparison = PerformCompare(compareFunction);
@ -1345,7 +1356,8 @@ PruneWithBoundaries(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *contex
int lowerBoundIdx = -1; int lowerBoundIdx = -1;
int upperBoundIdx = -1; int upperBoundIdx = -1;
int curIdx = 0; int curIdx = 0;
FunctionCallInfo compareFunctionCall = &context->compareIntervalFunctionCall; FunctionCallInfo compareFunctionCall = (FunctionCallInfo) &
context->compareIntervalFunctionCall;
if (prune->greaterEqualConsts) if (prune->greaterEqualConsts)
{ {
@ -1476,7 +1488,8 @@ ExhaustivePruneOne(ShardInterval *curInterval,
ClauseWalkerContext *context, ClauseWalkerContext *context,
PruningInstance *prune) PruningInstance *prune)
{ {
FunctionCallInfo compareFunctionCall = &context->compareIntervalFunctionCall; FunctionCallInfo compareFunctionCall = (FunctionCallInfo) &
context->compareIntervalFunctionCall;
Datum compareWith = 0; Datum compareWith = 0;
/* NULL boundaries can't be compared to */ /* NULL boundaries can't be compared to */

View File

@ -13,6 +13,7 @@
#include "distributed/function_utils.h" #include "distributed/function_utils.h"
#include "distributed/multi_progress.h" #include "distributed/multi_progress.h"
#include "distributed/version_compat.h"
#include "storage/dsm.h" #include "storage/dsm.h"
#include "utils/builtins.h" #include "utils/builtins.h"
@ -155,7 +156,8 @@ ProgressMonitorList(uint64 commandTypeMagicNumber, List **attachedDSMSegments)
getProgressInfoFunctionOid, getProgressInfoFunctionOid,
commandTypeDatum); commandTypeDatum);
tupleTableSlot = MakeSingleTupleTableSlot(progressResultSet->setDesc); tupleTableSlot = MakeSingleTupleTableSlotCompat(progressResultSet->setDesc,
&TTSOpsMinimalTuple);
/* iterate over tuples in tuple store, and send them to destination */ /* iterate over tuples in tuple store, and send them to destination */
for (;;) for (;;)

View File

@ -756,7 +756,7 @@ AppendShardIdToName(char **name, uint64 shardId)
if (neededBytes < 0) if (neededBytes < 0)
{ {
ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory: %s", strerror(errno)))); errmsg("out of memory: %m")));
} }
else if (neededBytes >= NAMEDATALEN) else if (neededBytes >= NAMEDATALEN)
{ {

View File

@ -0,0 +1,502 @@
/*-------------------------------------------------------------------------
*
* blackhole_am.c
* blackhole table access method code
*
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* Copied from https://github.com/michaelpq/pg_plugins/blob/master/blackhole_am/blackhole_am.c
*
*
* NOTES
* This file introduces the table access method blackhole, which can
* be used as a template for other table access methods, and guarantees
* that any data inserted into it gets sent to the void.
*
*-------------------------------------------------------------------------
*/
/* *INDENT-OFF* */
#include "postgres.h"
#if PG_VERSION_NUM >= 120000
#include "access/tableam.h"
#include "access/heapam.h"
#include "access/amapi.h"
#include "catalog/index.h"
#include "commands/vacuum.h"
#include "executor/tuptable.h"
PG_FUNCTION_INFO_V1(blackhole_am_handler);
/* Base structures for scans */
typedef struct BlackholeScanDescData
{
TableScanDescData rs_base; /* AM independent part of the descriptor */
/* Add more fields here as needed by the AM. */
} BlackholeScanDescData;
typedef struct BlackholeScanDescData *BlackholeScanDesc;
static const TableAmRoutine blackhole_methods;
/* ------------------------------------------------------------------------
* Slot related callbacks for blackhole AM
* ------------------------------------------------------------------------
*/
static const TupleTableSlotOps *
blackhole_slot_callbacks(Relation relation)
{
/*
* Here you would most likely want to invent your own set of
* slot callbacks for your AM.
*/
return &TTSOpsMinimalTuple;
}
/* ------------------------------------------------------------------------
* Table Scan Callbacks for blackhole AM
* ------------------------------------------------------------------------
*/
static TableScanDesc
blackhole_scan_begin(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key,
ParallelTableScanDesc parallel_scan,
uint32 flags)
{
BlackholeScanDesc scan;
scan = (BlackholeScanDesc) palloc(sizeof(BlackholeScanDescData));
scan->rs_base.rs_rd = relation;
scan->rs_base.rs_snapshot = snapshot;
scan->rs_base.rs_nkeys = nkeys;
scan->rs_base.rs_flags = flags;
scan->rs_base.rs_parallel = parallel_scan;
return (TableScanDesc) scan; }
static void
blackhole_scan_end(TableScanDesc sscan)
{
BlackholeScanDesc scan = (BlackholeScanDesc) sscan;
pfree(scan);
}
static void
blackhole_scan_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
bool allow_strat, bool allow_sync, bool allow_pagemode)
{
/* nothing to do */
}
static bool
blackhole_scan_getnextslot(TableScanDesc sscan, ScanDirection direction,
TupleTableSlot *slot)
{
/* nothing to do */
return false;
}
/* ------------------------------------------------------------------------
* Index Scan Callbacks for blackhole AM
* ------------------------------------------------------------------------
*/
static IndexFetchTableData *
blackhole_index_fetch_begin(Relation rel)
{
return NULL;
}
static void
blackhole_index_fetch_reset(IndexFetchTableData *scan)
{
/* nothing to do here */
}
static void
blackhole_index_fetch_end(IndexFetchTableData *scan)
{
/* nothing to do here */
}
static bool
blackhole_index_fetch_tuple(struct IndexFetchTableData *scan,
ItemPointer tid,
Snapshot snapshot,
TupleTableSlot *slot,
bool *call_again, bool *all_dead)
{
/* there is no data */
return 0;
}
/* ------------------------------------------------------------------------
* Callbacks for non-modifying operations on individual tuples for
* blackhole AM.
* ------------------------------------------------------------------------
*/
static bool
blackhole_fetch_row_version(Relation relation,
ItemPointer tid,
Snapshot snapshot,
TupleTableSlot *slot)
{
/* nothing to do */
return false;
}
static void
blackhole_get_latest_tid(TableScanDesc sscan,
ItemPointer tid)
{
/* nothing to do */
}
static bool
blackhole_tuple_tid_valid(TableScanDesc scan, ItemPointer tid)
{
return false;
}
static bool
blackhole_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
Snapshot snapshot)
{
return false;
}
static TransactionId
blackhole_compute_xid_horizon_for_tuples(Relation rel,
ItemPointerData *tids,
int nitems)
{
return InvalidTransactionId;
}
/* ----------------------------------------------------------------------------
* Functions for manipulations of physical tuples for blackhole AM.
* ----------------------------------------------------------------------------
*/
static void
blackhole_tuple_insert(Relation relation, TupleTableSlot *slot,
CommandId cid, int options, BulkInsertState bistate)
{
/* nothing to do */
}
static void
blackhole_tuple_insert_speculative(Relation relation, TupleTableSlot *slot,
CommandId cid, int options,
BulkInsertState bistate,
uint32 specToken)
{
/* nothing to do */
}
static void
blackhole_tuple_complete_speculative(Relation relation, TupleTableSlot *slot,
uint32 spekToken, bool succeeded)
{
/* nothing to do */
}
static void
blackhole_multi_insert(Relation relation, TupleTableSlot **slots,
int ntuples, CommandId cid, int options,
BulkInsertState bistate)
{
/* nothing to do */
}
static TM_Result
blackhole_tuple_delete(Relation relation, ItemPointer tid, CommandId cid,
Snapshot snapshot, Snapshot crosscheck, bool wait,
TM_FailureData *tmfd, bool changingPart)
{
/* nothing to do, so it is always OK */
return TM_Ok;
}
static TM_Result
blackhole_tuple_update(Relation relation, ItemPointer otid,
TupleTableSlot *slot, CommandId cid,
Snapshot snapshot, Snapshot crosscheck,
bool wait, TM_FailureData *tmfd,
LockTupleMode *lockmode, bool *update_indexes)
{
/* nothing to do, so it is always OK */
return TM_Ok;
}
static TM_Result
blackhole_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot,
TupleTableSlot *slot, CommandId cid, LockTupleMode mode,
LockWaitPolicy wait_policy, uint8 flags,
TM_FailureData *tmfd)
{
/* nothing to do, so it is always OK */
return TM_Ok;
}
static void
blackhole_finish_bulk_insert(Relation relation, int options)
{
/* nothing to do */
}
/* ------------------------------------------------------------------------
* DDL related callbacks for blackhole AM.
* ------------------------------------------------------------------------
*/
static void
blackhole_relation_set_new_filenode(Relation rel,
const RelFileNode *newrnode,
char persistence,
TransactionId *freezeXid,
MultiXactId *minmulti)
{
/* nothing to do */
}
static void
blackhole_relation_nontransactional_truncate(Relation rel)
{
/* nothing to do */
}
static void
blackhole_copy_data(Relation rel, const RelFileNode *newrnode)
{
/* there is no data */
}
static void
blackhole_copy_for_cluster(Relation OldTable, Relation NewTable,
Relation OldIndex, bool use_sort,
TransactionId OldestXmin,
TransactionId *xid_cutoff,
MultiXactId *multi_cutoff,
double *num_tuples,
double *tups_vacuumed,
double *tups_recently_dead)
{
/* no data, so nothing to do */
}
static void
blackhole_vacuum(Relation onerel, VacuumParams *params,
BufferAccessStrategy bstrategy)
{
/* no data, so nothing to do */
}
static bool
blackhole_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno,
BufferAccessStrategy bstrategy)
{
/* no data, so no point to analyze next block */
return false;
}
static bool
blackhole_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin,
double *liverows, double *deadrows,
TupleTableSlot *slot)
{
/* no data, so no point to analyze next tuple */
return false;
}
static double
blackhole_index_build_range_scan(Relation tableRelation,
Relation indexRelation,
IndexInfo *indexInfo,
bool allow_sync,
bool anyvisible,
bool progress,
BlockNumber start_blockno,
BlockNumber numblocks,
IndexBuildCallback callback,
void *callback_state,
TableScanDesc scan)
{
/* no data, so no tuples */
return 0;
}
static void
blackhole_index_validate_scan(Relation tableRelation,
Relation indexRelation,
IndexInfo *indexInfo,
Snapshot snapshot,
ValidateIndexState *state)
{
/* nothing to do */
}
/* ------------------------------------------------------------------------
* Miscellaneous callbacks for the blackhole AM
* ------------------------------------------------------------------------
*/
static uint64
blackhole_relation_size(Relation rel, ForkNumber forkNumber)
{
/* there is nothing */
return 0;
}
/*
* Check to see whether the table needs a TOAST table.
*/
static bool
blackhole_relation_needs_toast_table(Relation rel)
{
/* no data, so no toast table needed */
return false;
}
/* ------------------------------------------------------------------------
* Planner related callbacks for the blackhole AM
* ------------------------------------------------------------------------
*/
static void
blackhole_estimate_rel_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples,
double *allvisfrac)
{
/* no data available */
*attr_widths = 0;
*tuples = 0;
*allvisfrac = 0;
*pages = 0;
}
/* ------------------------------------------------------------------------
* Executor related callbacks for the blackhole AM
* ------------------------------------------------------------------------
*/
static bool
blackhole_scan_bitmap_next_block(TableScanDesc scan,
TBMIterateResult *tbmres)
{
/* no data, so no point to scan next block */
return false;
}
static bool
blackhole_scan_bitmap_next_tuple(TableScanDesc scan,
TBMIterateResult *tbmres,
TupleTableSlot *slot)
{
/* no data, so no point to scan next tuple */
return false;
}
static bool
blackhole_scan_sample_next_block(TableScanDesc scan,
SampleScanState *scanstate)
{
/* no data, so no point to scan next block for sampling */
return false;
}
static bool
blackhole_scan_sample_next_tuple(TableScanDesc scan,
SampleScanState *scanstate,
TupleTableSlot *slot)
{
/* no data, so no point to scan next tuple for sampling */
return false;
}
/* ------------------------------------------------------------------------
* Definition of the blackhole table access method.
* ------------------------------------------------------------------------
*/
static const TableAmRoutine blackhole_methods = {
.type = T_TableAmRoutine,
.slot_callbacks = blackhole_slot_callbacks,
.scan_begin = blackhole_scan_begin,
.scan_end = blackhole_scan_end,
.scan_rescan = blackhole_scan_rescan,
.scan_getnextslot = blackhole_scan_getnextslot,
/* these are common helper functions */
.parallelscan_estimate = table_block_parallelscan_estimate,
.parallelscan_initialize = table_block_parallelscan_initialize,
.parallelscan_reinitialize = table_block_parallelscan_reinitialize,
.index_fetch_begin = blackhole_index_fetch_begin,
.index_fetch_reset = blackhole_index_fetch_reset,
.index_fetch_end = blackhole_index_fetch_end,
.index_fetch_tuple = blackhole_index_fetch_tuple,
.tuple_insert = blackhole_tuple_insert,
.tuple_insert_speculative = blackhole_tuple_insert_speculative,
.tuple_complete_speculative = blackhole_tuple_complete_speculative,
.multi_insert = blackhole_multi_insert,
.tuple_delete = blackhole_tuple_delete,
.tuple_update = blackhole_tuple_update,
.tuple_lock = blackhole_tuple_lock,
.finish_bulk_insert = blackhole_finish_bulk_insert,
.tuple_fetch_row_version = blackhole_fetch_row_version,
.tuple_get_latest_tid = blackhole_get_latest_tid,
.tuple_tid_valid = blackhole_tuple_tid_valid,
.tuple_satisfies_snapshot = blackhole_tuple_satisfies_snapshot,
.compute_xid_horizon_for_tuples = blackhole_compute_xid_horizon_for_tuples,
.relation_set_new_filenode = blackhole_relation_set_new_filenode,
.relation_nontransactional_truncate = blackhole_relation_nontransactional_truncate,
.relation_copy_data = blackhole_copy_data,
.relation_copy_for_cluster = blackhole_copy_for_cluster,
.relation_vacuum = blackhole_vacuum,
.scan_analyze_next_block = blackhole_scan_analyze_next_block,
.scan_analyze_next_tuple = blackhole_scan_analyze_next_tuple,
.index_build_range_scan = blackhole_index_build_range_scan,
.index_validate_scan = blackhole_index_validate_scan,
.relation_size = blackhole_relation_size,
.relation_needs_toast_table = blackhole_relation_needs_toast_table,
.relation_estimate_size = blackhole_estimate_rel_size,
.scan_bitmap_next_block = blackhole_scan_bitmap_next_block,
.scan_bitmap_next_tuple = blackhole_scan_bitmap_next_tuple,
.scan_sample_next_block = blackhole_scan_sample_next_block,
.scan_sample_next_tuple = blackhole_scan_sample_next_tuple
};
Datum
blackhole_am_handler(PG_FUNCTION_ARGS)
{
PG_RETURN_POINTER(&blackhole_methods);
}
#endif

View File

@ -40,11 +40,6 @@
#include "utils/palloc.h" #include "utils/palloc.h"
/* forward declaration of local functions */
static int CompareShardPlacementsByWorker(const void *leftElement,
const void *rightElement);
/* declarations for dynamic loading */ /* declarations for dynamic loading */
PG_FUNCTION_INFO_V1(load_shard_id_array); PG_FUNCTION_INFO_V1(load_shard_id_array);
PG_FUNCTION_INFO_V1(load_shard_interval_array); PG_FUNCTION_INFO_V1(load_shard_interval_array);
@ -171,35 +166,6 @@ load_shard_placement_array(PG_FUNCTION_ARGS)
} }
/*
* CompareShardPlacementsByWorker compares two shard placements by their
* worker node name and port.
*/
static int
CompareShardPlacementsByWorker(const void *leftElement, const void *rightElement)
{
const ShardPlacement *leftPlacement = *((const ShardPlacement **) leftElement);
const ShardPlacement *rightPlacement = *((const ShardPlacement **) rightElement);
int nodeNameCmp = strncmp(leftPlacement->nodeName, rightPlacement->nodeName,
WORKER_LENGTH);
if (nodeNameCmp != 0)
{
return nodeNameCmp;
}
else if (leftPlacement->nodePort > rightPlacement->nodePort)
{
return 1;
}
else if (leftPlacement->nodePort < rightPlacement->nodePort)
{
return -1;
}
return 0;
}
/* /*
* partition_column_id simply finds a distributed table using the provided Oid * partition_column_id simply finds a distributed table using the provided Oid
* and returns the column_id of its partition column. If the specified table is * and returns the column_id of its partition column. If the specified table is

View File

@ -22,7 +22,11 @@
#include "nodes/nodes.h" #include "nodes/nodes.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#include "nodes/plannodes.h" #include "nodes/plannodes.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/pathnodes.h"
#else
#include "nodes/relation.h" #include "nodes/relation.h"
#endif
#include "optimizer/pathnode.h" #include "optimizer/pathnode.h"
#include "optimizer/planmain.h" #include "optimizer/planmain.h"
#include "optimizer/restrictinfo.h" #include "optimizer/restrictinfo.h"

View File

@ -25,9 +25,13 @@
#include "distributed/multi_physical_planner.h" #include "distributed/multi_physical_planner.h"
#include "distributed/resource_lock.h" #include "distributed/resource_lock.h"
#include "distributed/shard_pruning.h" #include "distributed/shard_pruning.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#endif
#include "nodes/nodes.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#include "nodes/primnodes.h" #include "nodes/primnodes.h"
#include "nodes/nodes.h"
#include "optimizer/clauses.h" #include "optimizer/clauses.h"
#include "utils/array.h" #include "utils/array.h"
#include "utils/palloc.h" #include "utils/palloc.h"

View File

@ -19,6 +19,9 @@
#include <sys/stat.h> #include <sys/stat.h>
#include <unistd.h> #include <unistd.h>
#if PG_VERSION_NUM >= 120000
#include "access/genam.h"
#endif
#include "access/heapam.h" #include "access/heapam.h"
#include "access/htup_details.h" #include "access/htup_details.h"
#include "access/relscan.h" #include "access/relscan.h"

View File

@ -309,10 +309,13 @@ GetRangeTblKind(RangeTblEntry *rte)
case RTE_JOIN: case RTE_JOIN:
case RTE_VALUES: case RTE_VALUES:
case RTE_CTE: case RTE_CTE:
{ #if PG_VERSION_NUM >= 120000
rteKind = (CitusRTEKind) rte->rtekind; case RTE_RESULT:
break; #endif
} {
rteKind = (CitusRTEKind) rte->rtekind;
break;
}
case RTE_FUNCTION: case RTE_FUNCTION:
{ {

View File

@ -30,7 +30,11 @@
#include "distributed/master_metadata_utility.h" #include "distributed/master_metadata_utility.h"
#include "lib/stringinfo.h" #include "lib/stringinfo.h"
#include "nodes/plannodes.h" #include "nodes/plannodes.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/pathnodes.h"
#else
#include "nodes/relation.h" #include "nodes/relation.h"
#endif
#include "utils/datum.h" #include "utils/datum.h"

View File

@ -45,9 +45,15 @@ CitusSetTag(Node *node, int tag)
nodeTypeName *local_node = (nodeTypeName *) CitusSetTag((Node *) node, T_##nodeTypeName) nodeTypeName *local_node = (nodeTypeName *) CitusSetTag((Node *) node, T_##nodeTypeName)
/* And a few guys need only the pg_strtok support fields */ /* And a few guys need only the pg_strtok support fields */
#if PG_VERSION_NUM >= 120000
#define READ_TEMP_LOCALS() \ #define READ_TEMP_LOCALS() \
char *token; \ const char *token; \
int length int length
#else
#define READ_TEMP_LOCALS() \
char *token; \
int length
#endif
/* ... but most need both */ /* ... but most need both */
#define READ_LOCALS(nodeTypeName) \ #define READ_LOCALS(nodeTypeName) \

View File

@ -128,7 +128,11 @@ get_extension_schema(Oid ext_oid)
rel = heap_open(ExtensionRelationId, AccessShareLock); rel = heap_open(ExtensionRelationId, AccessShareLock);
ScanKeyInit(&entry[0], ScanKeyInit(&entry[0],
#if PG_VERSION_NUM >= 120000
Anum_pg_extension_oid,
#else
ObjectIdAttributeNumber, ObjectIdAttributeNumber,
#endif
BTEqualStrategyNumber, F_OIDEQ, BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(ext_oid)); ObjectIdGetDatum(ext_oid));

View File

@ -12,8 +12,14 @@
#include "postgres.h" #include "postgres.h"
#if PG_VERSION_NUM >= 120000
#include "access/genam.h"
#endif
#include "access/htup_details.h" #include "access/htup_details.h"
#include "access/stratnum.h" #include "access/stratnum.h"
#if PG_VERSION_NUM >= 120000
#include "access/table.h"
#endif
#include "catalog/pg_constraint.h" #include "catalog/pg_constraint.h"
#include "distributed/foreign_key_relationship.h" #include "distributed/foreign_key_relationship.h"
#include "distributed/hash_helpers.h" #include "distributed/hash_helpers.h"

View File

@ -11,11 +11,10 @@
#include "catalog/namespace.h" #include "catalog/namespace.h"
#include "distributed/function_utils.h" #include "distributed/function_utils.h"
#include "distributed/version_compat.h"
#include "executor/executor.h" #include "executor/executor.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#if (PG_VERSION_NUM >= 100000)
#include "utils/regproc.h" #include "utils/regproc.h"
#endif
/* /*
* FunctionOid searches for a function that has the given name and the given * FunctionOid searches for a function that has the given name and the given
@ -83,7 +82,7 @@ FunctionOidExtended(const char *schemaName, const char *functionName, int argume
ReturnSetInfo * ReturnSetInfo *
FunctionCallGetTupleStore1(PGFunction function, Oid functionId, Datum argument) FunctionCallGetTupleStore1(PGFunction function, Oid functionId, Datum argument)
{ {
FunctionCallInfoData fcinfo; LOCAL_FCINFO(fcinfo, 1);
FmgrInfo flinfo; FmgrInfo flinfo;
ReturnSetInfo *rsinfo = makeNode(ReturnSetInfo); ReturnSetInfo *rsinfo = makeNode(ReturnSetInfo);
EState *estate = CreateExecutorState(); EState *estate = CreateExecutorState();
@ -91,12 +90,11 @@ FunctionCallGetTupleStore1(PGFunction function, Oid functionId, Datum argument)
rsinfo->allowedModes = SFRM_Materialize; rsinfo->allowedModes = SFRM_Materialize;
fmgr_info(functionId, &flinfo); fmgr_info(functionId, &flinfo);
InitFunctionCallInfoData(fcinfo, &flinfo, 1, InvalidOid, NULL, (Node *) rsinfo); InitFunctionCallInfoData(*fcinfo, &flinfo, 1, InvalidOid, NULL, (Node *) rsinfo);
fcinfo.arg[0] = argument; fcSetArg(fcinfo, 0, argument);
fcinfo.argnull[0] = false;
(*function)(&fcinfo); (*function)(fcinfo);
return rsinfo; return rsinfo;
} }

View File

@ -20,6 +20,7 @@
#include "access/sysattr.h" #include "access/sysattr.h"
#include "catalog/indexing.h" #include "catalog/indexing.h"
#include "catalog/pg_am.h" #include "catalog/pg_am.h"
#include "catalog/pg_enum.h"
#include "catalog/pg_extension.h" #include "catalog/pg_extension.h"
#include "catalog/pg_namespace.h" #include "catalog/pg_namespace.h"
#include "catalog/pg_type.h" #include "catalog/pg_type.h"
@ -43,6 +44,7 @@
#include "distributed/pg_dist_placement.h" #include "distributed/pg_dist_placement.h"
#include "distributed/shared_library_init.h" #include "distributed/shared_library_init.h"
#include "distributed/shardinterval_utils.h" #include "distributed/shardinterval_utils.h"
#include "distributed/version_compat.h"
#include "distributed/worker_manager.h" #include "distributed/worker_manager.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "executor/executor.h" #include "executor/executor.h"
@ -1259,8 +1261,6 @@ static ShardInterval **
SortShardIntervalArray(ShardInterval **shardIntervalArray, int shardCount, SortShardIntervalArray(ShardInterval **shardIntervalArray, int shardCount,
FmgrInfo *shardIntervalSortCompareFunction) FmgrInfo *shardIntervalSortCompareFunction)
{ {
ShardInterval **sortedShardIntervalArray = NULL;
/* short cut if there are no shard intervals in the array */ /* short cut if there are no shard intervals in the array */
if (shardCount == 0) if (shardCount == 0)
{ {
@ -1272,9 +1272,7 @@ SortShardIntervalArray(ShardInterval **shardIntervalArray, int shardCount,
(qsort_arg_comparator) CompareShardIntervals, (qsort_arg_comparator) CompareShardIntervals,
(void *) shardIntervalSortCompareFunction); (void *) shardIntervalSortCompareFunction);
sortedShardIntervalArray = shardIntervalArray; return shardIntervalArray;
return sortedShardIntervalArray;
} }
@ -1624,9 +1622,8 @@ AvailableExtensionVersion(void)
{ {
ReturnSetInfo *extensionsResultSet = NULL; ReturnSetInfo *extensionsResultSet = NULL;
TupleTableSlot *tupleTableSlot = NULL; TupleTableSlot *tupleTableSlot = NULL;
FunctionCallInfoData *fcinfo = NULL; LOCAL_FCINFO(fcinfo, 0);
FmgrInfo *flinfo = NULL; FmgrInfo flinfo;
int argumentCount = 0;
EState *estate = NULL; EState *estate = NULL;
bool hasTuple = false; bool hasTuple = false;
@ -1641,17 +1638,15 @@ AvailableExtensionVersion(void)
extensionsResultSet->econtext = GetPerTupleExprContext(estate); extensionsResultSet->econtext = GetPerTupleExprContext(estate);
extensionsResultSet->allowedModes = SFRM_Materialize; extensionsResultSet->allowedModes = SFRM_Materialize;
fcinfo = palloc0(sizeof(FunctionCallInfoData)); fmgr_info(F_PG_AVAILABLE_EXTENSIONS, &flinfo);
flinfo = palloc0(sizeof(FmgrInfo)); InitFunctionCallInfoData(*fcinfo, &flinfo, 0, InvalidOid, NULL,
fmgr_info(F_PG_AVAILABLE_EXTENSIONS, flinfo);
InitFunctionCallInfoData(*fcinfo, flinfo, argumentCount, InvalidOid, NULL,
(Node *) extensionsResultSet); (Node *) extensionsResultSet);
/* pg_available_extensions returns result set containing all available extensions */ /* pg_available_extensions returns result set containing all available extensions */
(*pg_available_extensions)(fcinfo); (*pg_available_extensions)(fcinfo);
tupleTableSlot = MakeSingleTupleTableSlot(extensionsResultSet->setDesc); tupleTableSlot = MakeSingleTupleTableSlotCompat(extensionsResultSet->setDesc,
&TTSOpsMinimalTuple);
hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, doCopy, hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, doCopy,
tupleTableSlot); tupleTableSlot);
while (hasTuple) while (hasTuple)
@ -1992,9 +1987,10 @@ CitusCopyFormatTypeId(void)
if (MetadataCache.copyFormatTypeId == InvalidOid) if (MetadataCache.copyFormatTypeId == InvalidOid)
{ {
char *typeName = "citus_copy_format"; char *typeName = "citus_copy_format";
MetadataCache.copyFormatTypeId = GetSysCacheOid2(TYPENAMENSP, MetadataCache.copyFormatTypeId = GetSysCacheOid2Compat(TYPENAMENSP,
PointerGetDatum(typeName), Anum_pg_enum_oid,
PG_CATALOG_NAMESPACE); PointerGetDatum(typeName),
PG_CATALOG_NAMESPACE);
} }
return MetadataCache.copyFormatTypeId; return MetadataCache.copyFormatTypeId;
@ -2256,7 +2252,11 @@ LookupNodeRoleTypeOid()
return InvalidOid; return InvalidOid;
} }
#if PG_VERSION_NUM >= 120000
nodeRoleTypId = ((Form_pg_type) GETSTRUCT(tup))->oid;
#else
nodeRoleTypId = HeapTupleGetOid(tup); nodeRoleTypId = HeapTupleGetOid(tup);
#endif
ReleaseSysCache(tup); ReleaseSysCache(tup);
return nodeRoleTypId; return nodeRoleTypId;

View File

@ -25,6 +25,9 @@
#include "lib/stringinfo.h" #include "lib/stringinfo.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#include "pgstat.h" #include "pgstat.h"
#if PG_VERSION_NUM >= 120000
#include "partitioning/partdesc.h"
#endif
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/fmgroids.h" #include "utils/fmgroids.h"
#include "utils/lsyscache.h" #include "utils/lsyscache.h"

View File

@ -17,7 +17,7 @@
#include "postgres.h" #include "postgres.h"
#if (PG_VERSION_NUM >= 110000) #if (PG_VERSION_NUM >= 110000) && (PG_VERSION_NUM < 120000)
#include <ctype.h> #include <ctype.h>
#include <unistd.h> #include <unistd.h>
@ -7909,4 +7909,4 @@ get_range_partbound_string(List *bound_datums)
return buf->data; return buf->data;
} }
#endif /* (PG_VERSION_NUM >= 110000) */ #endif /* (PG_VERSION_NUM >= 110000) && (PG_VERSION_NUM < 120000) */

File diff suppressed because it is too large Load Diff

View File

@ -254,7 +254,9 @@ FindShardInterval(Datum partitionColumnValue, DistTableCacheEntry *cacheEntry)
if (cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH) if (cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH)
{ {
searchedValue = FunctionCall1(cacheEntry->hashFunction, partitionColumnValue); searchedValue = FunctionCall1Coll(cacheEntry->hashFunction,
cacheEntry->partitionColumn->varcollid,
partitionColumnValue);
} }
shardIndex = FindShardIntervalIndex(searchedValue, cacheEntry); shardIndex = FindShardIntervalIndex(searchedValue, cacheEntry);

View File

@ -13,7 +13,6 @@
#include "citus_version.h" #include "citus_version.h"
#include "fmgr.h" #include "fmgr.h"
#include "utils/uuid.h" #include "utils/uuid.h"
#include "utils/backend_random.h"
bool EnableStatisticsCollection = true; /* send basic usage statistics to Citus */ bool EnableStatisticsCollection = true; /* send basic usage statistics to Citus */
@ -600,11 +599,11 @@ citus_server_id(PG_FUNCTION_ARGS)
uint8 *buf = (uint8 *) palloc(UUID_LEN); uint8 *buf = (uint8 *) palloc(UUID_LEN);
/* /*
* If pg_backend_random() fails, fall-back to using random(). In previous * If pg_strong_random() fails, fall-back to using random(). In previous
* versions of postgres we don't have pg_backend_random(), so use it by * versions of postgres we don't have pg_strong_random(), so use it by
* default in that case. * default in that case.
*/ */
if (!pg_backend_random((char *) buf, UUID_LEN)) if (!pg_strong_random((char *) buf, UUID_LEN))
{ {
int bufIdx = 0; int bufIdx = 0;
for (bufIdx = 0; bufIdx < UUID_LEN; bufIdx++) for (bufIdx = 0; bufIdx < UUID_LEN; bufIdx++)

View File

@ -17,6 +17,10 @@
#include "funcapi.h" #include "funcapi.h"
#include "miscadmin.h" #include "miscadmin.h"
#if PG_VERSION_NUM >= 120000
#include "access/genam.h"
#include "access/table.h"
#endif
#include "access/htup_details.h" #include "access/htup_details.h"
#include "access/xact.h" #include "access/xact.h"
#include "catalog/dependency.h" #include "catalog/dependency.h"
@ -35,7 +39,6 @@
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/snapmgr.h" #include "utils/snapmgr.h"
#include "utils/syscache.h" #include "utils/syscache.h"
#include "utils/tqual.h"
/* Local functions forward declarations */ /* Local functions forward declarations */
@ -263,7 +266,11 @@ Datum
worker_cleanup_job_schema_cache(PG_FUNCTION_ARGS) worker_cleanup_job_schema_cache(PG_FUNCTION_ARGS)
{ {
Relation pgNamespace = NULL; Relation pgNamespace = NULL;
#if PG_VERSION_NUM >= 120000
TableScanDesc scanDescriptor = NULL;
#else
HeapScanDesc scanDescriptor = NULL; HeapScanDesc scanDescriptor = NULL;
#endif
ScanKey scanKey = NULL; ScanKey scanKey = NULL;
int scanKeyCount = 0; int scanKeyCount = 0;
HeapTuple heapTuple = NULL; HeapTuple heapTuple = NULL;
@ -271,7 +278,11 @@ worker_cleanup_job_schema_cache(PG_FUNCTION_ARGS)
CheckCitusVersion(ERROR); CheckCitusVersion(ERROR);
pgNamespace = heap_open(NamespaceRelationId, AccessExclusiveLock); pgNamespace = heap_open(NamespaceRelationId, AccessExclusiveLock);
#if PG_VERSION_NUM >= 120000
scanDescriptor = table_beginscan_catalog(pgNamespace, scanKeyCount, scanKey);
#else
scanDescriptor = heap_beginscan_catalog(pgNamespace, scanKeyCount, scanKey); scanDescriptor = heap_beginscan_catalog(pgNamespace, scanKeyCount, scanKey);
#endif
heapTuple = heap_getnext(scanDescriptor, ForwardScanDirection); heapTuple = heap_getnext(scanDescriptor, ForwardScanDirection);
while (HeapTupleIsValid(heapTuple)) while (HeapTupleIsValid(heapTuple))
@ -362,7 +373,8 @@ RemoveJobSchema(StringInfo schemaName)
Datum schemaNameDatum = CStringGetDatum(schemaName->data); Datum schemaNameDatum = CStringGetDatum(schemaName->data);
Oid schemaId = InvalidOid; Oid schemaId = InvalidOid;
schemaId = GetSysCacheOid(NAMESPACENAME, schemaNameDatum, 0, 0, 0); schemaId = GetSysCacheOid1Compat(NAMESPACENAME, Anum_pg_namespace_oid,
schemaNameDatum);
if (OidIsValid(schemaId)) if (OidIsValid(schemaId))
{ {
ObjectAddress schemaObject = { 0, 0, 0 }; ObjectAddress schemaObject = { 0, 0, 0 };

View File

@ -65,8 +65,8 @@ static uint32 FileBufferSize(int partitionBufferSizeInKB, uint32 fileCount);
static FileOutputStream * OpenPartitionFiles(StringInfo directoryName, uint32 fileCount); static FileOutputStream * OpenPartitionFiles(StringInfo directoryName, uint32 fileCount);
static void ClosePartitionFiles(FileOutputStream *partitionFileArray, uint32 fileCount); static void ClosePartitionFiles(FileOutputStream *partitionFileArray, uint32 fileCount);
static void RenameDirectory(StringInfo oldDirectoryName, StringInfo newDirectoryName); static void RenameDirectory(StringInfo oldDirectoryName, StringInfo newDirectoryName);
static void FileOutputStreamWrite(FileOutputStream file, StringInfo dataToWrite); static void FileOutputStreamWrite(FileOutputStream *file, StringInfo dataToWrite);
static void FileOutputStreamFlush(FileOutputStream file); static void FileOutputStreamFlush(FileOutputStream *file);
static void FilterAndPartitionTable(const char *filterQuery, static void FilterAndPartitionTable(const char *filterQuery,
const char *columnName, Oid columnType, const char *columnName, Oid columnType,
uint32 (*PartitionIdFunction)(Datum, const void *), uint32 (*PartitionIdFunction)(Datum, const void *),
@ -221,6 +221,7 @@ worker_hash_partition_table(PG_FUNCTION_ARGS)
partitionContext->hashFunction = hashFunction; partitionContext->hashFunction = hashFunction;
partitionContext->partitionCount = partitionCount; partitionContext->partitionCount = partitionCount;
partitionContext->collation = PG_GET_COLLATION();
/* we'll use binary search, we need the comparison function */ /* we'll use binary search, we need the comparison function */
if (!partitionContext->hasUniformHashDistribution) if (!partitionContext->hasUniformHashDistribution)
@ -464,7 +465,7 @@ OpenPartitionFiles(StringInfo directoryName, uint32 fileCount)
FileOutputStream *partitionFileArray = NULL; FileOutputStream *partitionFileArray = NULL;
File fileDescriptor = 0; File fileDescriptor = 0;
uint32 fileIndex = 0; uint32 fileIndex = 0;
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | PG_BINARY); const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
const int fileMode = (S_IRUSR | S_IWUSR); const int fileMode = (S_IRUSR | S_IWUSR);
partitionFileArray = palloc0(fileCount * sizeof(FileOutputStream)); partitionFileArray = palloc0(fileCount * sizeof(FileOutputStream));
@ -480,7 +481,8 @@ OpenPartitionFiles(StringInfo directoryName, uint32 fileCount)
errmsg("could not open file \"%s\": %m", filePath->data))); errmsg("could not open file \"%s\": %m", filePath->data)));
} }
partitionFileArray[fileIndex].fileDescriptor = fileDescriptor; partitionFileArray[fileIndex].fileCompat = FileCompatFromFileStart(
fileDescriptor);
partitionFileArray[fileIndex].fileBuffer = makeStringInfo(); partitionFileArray[fileIndex].fileBuffer = makeStringInfo();
partitionFileArray[fileIndex].filePath = filePath; partitionFileArray[fileIndex].filePath = filePath;
} }
@ -500,13 +502,13 @@ ClosePartitionFiles(FileOutputStream *partitionFileArray, uint32 fileCount)
uint32 fileIndex = 0; uint32 fileIndex = 0;
for (fileIndex = 0; fileIndex < fileCount; fileIndex++) for (fileIndex = 0; fileIndex < fileCount; fileIndex++)
{ {
FileOutputStream partitionFile = partitionFileArray[fileIndex]; FileOutputStream *partitionFile = &partitionFileArray[fileIndex];
FileOutputStreamFlush(partitionFile); FileOutputStreamFlush(partitionFile);
FileClose(partitionFile.fileDescriptor); FileClose(partitionFile->fileCompat.fd);
FreeStringInfo(partitionFile.fileBuffer); FreeStringInfo(partitionFile->fileBuffer);
FreeStringInfo(partitionFile.filePath); FreeStringInfo(partitionFile->filePath);
} }
pfree(partitionFileArray); pfree(partitionFileArray);
@ -829,9 +831,9 @@ RenameDirectory(StringInfo oldDirectoryName, StringInfo newDirectoryName)
* if so, the function flushes the buffer to the underlying file. * if so, the function flushes the buffer to the underlying file.
*/ */
static void static void
FileOutputStreamWrite(FileOutputStream file, StringInfo dataToWrite) FileOutputStreamWrite(FileOutputStream *file, StringInfo dataToWrite)
{ {
StringInfo fileBuffer = file.fileBuffer; StringInfo fileBuffer = file->fileBuffer;
uint32 newBufferSize = fileBuffer->len + dataToWrite->len; uint32 newBufferSize = fileBuffer->len + dataToWrite->len;
appendBinaryStringInfo(fileBuffer, dataToWrite->data, dataToWrite->len); appendBinaryStringInfo(fileBuffer, dataToWrite->data, dataToWrite->len);
@ -847,19 +849,19 @@ FileOutputStreamWrite(FileOutputStream file, StringInfo dataToWrite)
/* Flushes data buffered in the file stream object to the underlying file. */ /* Flushes data buffered in the file stream object to the underlying file. */
static void static void
FileOutputStreamFlush(FileOutputStream file) FileOutputStreamFlush(FileOutputStream *file)
{ {
StringInfo fileBuffer = file.fileBuffer; StringInfo fileBuffer = file->fileBuffer;
int written = 0; int written = 0;
errno = 0; errno = 0;
written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len, written = FileWriteCompat(&file->fileCompat, fileBuffer->data, fileBuffer->len,
PG_WAIT_IO); PG_WAIT_IO);
if (written != fileBuffer->len) if (written != fileBuffer->len)
{ {
ereport(ERROR, (errcode_for_file_access(), ereport(ERROR, (errcode_for_file_access(),
errmsg("could not write %d bytes to partition file \"%s\"", errmsg("could not write %d bytes to partition file \"%s\"",
fileBuffer->len, file.filePath->data))); fileBuffer->len, file->filePath->data)));
} }
} }
@ -952,7 +954,7 @@ FilterAndPartitionTable(const char *filterQuery,
{ {
HeapTuple row = SPI_tuptable->vals[rowIndex]; HeapTuple row = SPI_tuptable->vals[rowIndex];
TupleDesc rowDescriptor = SPI_tuptable->tupdesc; TupleDesc rowDescriptor = SPI_tuptable->tupdesc;
FileOutputStream partitionFile = { 0, 0, 0 }; FileOutputStream *partitionFile = NULL;
StringInfo rowText = NULL; StringInfo rowText = NULL;
Datum partitionKey = 0; Datum partitionKey = 0;
bool partitionKeyNull = false; bool partitionKeyNull = false;
@ -988,7 +990,7 @@ FilterAndPartitionTable(const char *filterQuery,
rowText = rowOutputState->fe_msgbuf; rowText = rowOutputState->fe_msgbuf;
partitionFile = partitionFileArray[partitionId]; partitionFile = &partitionFileArray[partitionId];
FileOutputStreamWrite(partitionFile, rowText); FileOutputStreamWrite(partitionFile, rowText);
resetStringInfo(rowText); resetStringInfo(rowText);
@ -1136,7 +1138,7 @@ OutputBinaryHeaders(FileOutputStream *partitionFileArray, uint32 fileCount)
for (fileIndex = 0; fileIndex < fileCount; fileIndex++) for (fileIndex = 0; fileIndex < fileCount; fileIndex++)
{ {
/* Generate header for a binary copy */ /* Generate header for a binary copy */
FileOutputStream partitionFile = { 0, 0, 0 }; FileOutputStream partitionFile = { };
CopyOutStateData headerOutputStateData; CopyOutStateData headerOutputStateData;
CopyOutState headerOutputState = (CopyOutState) & headerOutputStateData; CopyOutState headerOutputState = (CopyOutState) & headerOutputStateData;
@ -1146,7 +1148,7 @@ OutputBinaryHeaders(FileOutputStream *partitionFileArray, uint32 fileCount)
AppendCopyBinaryHeaders(headerOutputState); AppendCopyBinaryHeaders(headerOutputState);
partitionFile = partitionFileArray[fileIndex]; partitionFile = partitionFileArray[fileIndex];
FileOutputStreamWrite(partitionFile, headerOutputState->fe_msgbuf); FileOutputStreamWrite(&partitionFile, headerOutputState->fe_msgbuf);
} }
} }
@ -1162,7 +1164,7 @@ OutputBinaryFooters(FileOutputStream *partitionFileArray, uint32 fileCount)
for (fileIndex = 0; fileIndex < fileCount; fileIndex++) for (fileIndex = 0; fileIndex < fileCount; fileIndex++)
{ {
/* Generate footer for a binary copy */ /* Generate footer for a binary copy */
FileOutputStream partitionFile = { 0, 0, 0 }; FileOutputStream partitionFile = { };
CopyOutStateData footerOutputStateData; CopyOutStateData footerOutputStateData;
CopyOutState footerOutputState = (CopyOutState) & footerOutputStateData; CopyOutState footerOutputState = (CopyOutState) & footerOutputStateData;
@ -1172,7 +1174,7 @@ OutputBinaryFooters(FileOutputStream *partitionFileArray, uint32 fileCount)
AppendCopyBinaryFooters(footerOutputState); AppendCopyBinaryFooters(footerOutputState);
partitionFile = partitionFileArray[fileIndex]; partitionFile = partitionFileArray[fileIndex];
FileOutputStreamWrite(partitionFile, footerOutputState->fe_msgbuf); FileOutputStreamWrite(&partitionFile, footerOutputState->fe_msgbuf);
} }
} }
@ -1263,7 +1265,8 @@ HashPartitionId(Datum partitionValue, const void *context)
ShardInterval **syntheticShardIntervalArray = ShardInterval **syntheticShardIntervalArray =
hashPartitionContext->syntheticShardIntervalArray; hashPartitionContext->syntheticShardIntervalArray;
FmgrInfo *comparisonFunction = hashPartitionContext->comparisonFunction; FmgrInfo *comparisonFunction = hashPartitionContext->comparisonFunction;
Datum hashDatum = FunctionCall1(hashFunction, partitionValue); Datum hashDatum = FunctionCall1Coll(hashFunction, hashPartitionContext->collation,
partitionValue);
int32 hashResult = 0; int32 hashResult = 0;
uint32 hashPartitionId = 0; uint32 hashPartitionId = 0;

View File

@ -16,6 +16,7 @@
#include "distributed/commands/multi_copy.h" #include "distributed/commands/multi_copy.h"
#include "distributed/multi_executor.h" #include "distributed/multi_executor.h"
#include "distributed/transmit.h" #include "distributed/transmit.h"
#include "distributed/version_compat.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#include "utils/memutils.h" #include "utils/memutils.h"
@ -38,7 +39,7 @@ typedef struct TaskFileDestReceiver
/* output file */ /* output file */
char *filePath; char *filePath;
File fileDesc; FileCompat fileCompat;
bool binaryCopyFormat; bool binaryCopyFormat;
/* state on how to copy out data types */ /* state on how to copy out data types */
@ -55,7 +56,7 @@ static DestReceiver * CreateTaskFileDestReceiver(char *filePath, EState *executo
static void TaskFileDestReceiverStartup(DestReceiver *dest, int operation, static void TaskFileDestReceiverStartup(DestReceiver *dest, int operation,
TupleDesc inputTupleDescriptor); TupleDesc inputTupleDescriptor);
static bool TaskFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest); static bool TaskFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest);
static void WriteToLocalFile(StringInfo copyData, File fileDesc); static void WriteToLocalFile(StringInfo copyData, TaskFileDestReceiver *taskFileDest);
static void TaskFileDestReceiverShutdown(DestReceiver *destReceiver); static void TaskFileDestReceiverShutdown(DestReceiver *destReceiver);
static void TaskFileDestReceiverDestroy(DestReceiver *destReceiver); static void TaskFileDestReceiverDestroy(DestReceiver *destReceiver);
@ -183,8 +184,10 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation,
taskFileDest->columnOutputFunctions = ColumnOutputFunctions(inputTupleDescriptor, taskFileDest->columnOutputFunctions = ColumnOutputFunctions(inputTupleDescriptor,
copyOutState->binary); copyOutState->binary);
taskFileDest->fileDesc = FileOpenForTransmit(taskFileDest->filePath, fileFlags, taskFileDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(
fileMode); taskFileDest->filePath,
fileFlags,
fileMode));
if (copyOutState->binary) if (copyOutState->binary)
{ {
@ -192,7 +195,7 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation,
resetStringInfo(copyOutState->fe_msgbuf); resetStringInfo(copyOutState->fe_msgbuf);
AppendCopyBinaryHeaders(copyOutState); AppendCopyBinaryHeaders(copyOutState);
WriteToLocalFile(copyOutState->fe_msgbuf, taskFileDest->fileDesc); WriteToLocalFile(copyOutState->fe_msgbuf, taskFileDest);
} }
MemoryContextSwitchTo(oldContext); MemoryContextSwitchTo(oldContext);
@ -233,7 +236,7 @@ TaskFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest)
AppendCopyRowData(columnValues, columnNulls, tupleDescriptor, AppendCopyRowData(columnValues, columnNulls, tupleDescriptor,
copyOutState, columnOutputFunctions, NULL); copyOutState, columnOutputFunctions, NULL);
WriteToLocalFile(copyOutState->fe_msgbuf, taskFileDest->fileDesc); WriteToLocalFile(copyOutState->fe_msgbuf, taskFileDest);
MemoryContextSwitchTo(oldContext); MemoryContextSwitchTo(oldContext);
@ -249,9 +252,10 @@ TaskFileDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest)
* WriteToLocalResultsFile writes the bytes in a StringInfo to a local file. * WriteToLocalResultsFile writes the bytes in a StringInfo to a local file.
*/ */
static void static void
WriteToLocalFile(StringInfo copyData, File fileDesc) WriteToLocalFile(StringInfo copyData, TaskFileDestReceiver *taskFileDest)
{ {
int bytesWritten = FileWrite(fileDesc, copyData->data, copyData->len, PG_WAIT_IO); int bytesWritten = FileWriteCompat(&taskFileDest->fileCompat, copyData->data,
copyData->len, PG_WAIT_IO);
if (bytesWritten < 0) if (bytesWritten < 0)
{ {
ereport(ERROR, (errcode_for_file_access(), ereport(ERROR, (errcode_for_file_access(),
@ -276,10 +280,10 @@ TaskFileDestReceiverShutdown(DestReceiver *destReceiver)
/* write footers when using binary encoding */ /* write footers when using binary encoding */
resetStringInfo(copyOutState->fe_msgbuf); resetStringInfo(copyOutState->fe_msgbuf);
AppendCopyBinaryFooters(copyOutState); AppendCopyBinaryFooters(copyOutState);
WriteToLocalFile(copyOutState->fe_msgbuf, taskFileDest->fileDesc); WriteToLocalFile(copyOutState->fe_msgbuf, taskFileDest);
} }
FileClose(taskFileDest->fileDesc); FileClose(taskFileDest->fileCompat.fd);
} }

View File

@ -12,11 +12,8 @@
#define CITUS_RULEUTILS_H #define CITUS_RULEUTILS_H
#include "postgres.h" /* IWYU pragma: keep */ #include "postgres.h" /* IWYU pragma: keep */
#include "c.h"
#if (PG_VERSION_NUM >= 100000)
#include "catalog/pg_sequence.h" #include "catalog/pg_sequence.h"
#endif
#include "commands/sequence.h" #include "commands/sequence.h"
#include "lib/stringinfo.h" #include "lib/stringinfo.h"
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"

View File

@ -93,11 +93,8 @@ extern void ErrorIfUnsupportedSeqStmt(CreateSeqStmt *createSeqStmt);
extern void ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt); extern void ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt);
#if (PG_VERSION_NUM >= 100000)
/* subscription.c - forward declarations */ /* subscription.c - forward declarations */
extern Node * ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt); extern Node * ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt);
#endif /* PG_VERSION_NUM >= 100000 */
/* table.c - forward declarations */ /* table.c - forward declarations */

View File

@ -11,7 +11,12 @@
#define DISTRIBUTED_PLANNER_H #define DISTRIBUTED_PLANNER_H
#include "nodes/plannodes.h" #include "nodes/plannodes.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/pathnodes.h"
#else
#include "nodes/relation.h" #include "nodes/relation.h"
#endif
#include "distributed/citus_nodes.h" #include "distributed/citus_nodes.h"
#include "distributed/errormessage.h" #include "distributed/errormessage.h"

View File

@ -11,6 +11,9 @@
#include "utils/hsearch.h" #include "utils/hsearch.h"
/* pg12 includes this exact implementation of hash_combine */
#if PG_VERSION_NUM < 120000
/* /*
* Combine two hash values, resulting in another hash value, with decent bit * Combine two hash values, resulting in another hash value, with decent bit
* mixing. * mixing.
@ -25,6 +28,9 @@ hash_combine(uint32 a, uint32 b)
} }
#endif
extern void hash_delete_all(HTAB *htab); extern void hash_delete_all(HTAB *htab);
#endif #endif

View File

@ -32,27 +32,6 @@
#define PG_TOTAL_RELATION_SIZE_FUNCTION "pg_total_relation_size(%s)" #define PG_TOTAL_RELATION_SIZE_FUNCTION "pg_total_relation_size(%s)"
#define CSTORE_TABLE_SIZE_FUNCTION "cstore_table_size(%s)" #define CSTORE_TABLE_SIZE_FUNCTION "cstore_table_size(%s)"
#if (PG_VERSION_NUM < 100000)
static inline void
CatalogTupleUpdate(Relation heapRel, ItemPointer otid, HeapTuple tup)
{
simple_heap_update(heapRel, otid, tup);
CatalogUpdateIndexes(heapRel, tup);
}
static inline Oid
CatalogTupleInsert(Relation heapRel, HeapTuple tup)
{
Oid oid = simple_heap_insert(heapRel, tup);
CatalogUpdateIndexes(heapRel, tup);
return oid;
}
#endif
/* In-memory representation of a typed tuple in pg_dist_shard. */ /* In-memory representation of a typed tuple in pg_dist_shard. */
typedef struct ShardInterval typedef struct ShardInterval
{ {
@ -168,6 +147,8 @@ extern char * ConstructQualifiedShardName(ShardInterval *shardInterval);
extern uint64 GetFirstShardId(Oid relationId); extern uint64 GetFirstShardId(Oid relationId);
extern Datum StringToDatum(char *inputString, Oid dataType); extern Datum StringToDatum(char *inputString, Oid dataType);
extern char * DatumToString(Datum datum, Oid dataType); extern char * DatumToString(Datum datum, Oid dataType);
extern int CompareShardPlacementsByWorker(const void *leftElement,
const void *rightElement);
#endif /* MASTER_METADATA_UTILITY_H */ #endif /* MASTER_METADATA_UTILITY_H */

View File

@ -51,13 +51,12 @@ typedef enum CitusRTEKind
CITUS_RTE_SUBQUERY = RTE_SUBQUERY, /* subquery in FROM */ CITUS_RTE_SUBQUERY = RTE_SUBQUERY, /* subquery in FROM */
CITUS_RTE_JOIN = RTE_JOIN, /* join */ CITUS_RTE_JOIN = RTE_JOIN, /* join */
CITUS_RTE_FUNCTION = RTE_FUNCTION, /* function in FROM */ CITUS_RTE_FUNCTION = RTE_FUNCTION, /* function in FROM */
#if (PG_VERSION_NUM >= 100000)
CITUS_RTE_TABLEFUNC = RTE_TABLEFUNC, /* TableFunc(.., column list) */ CITUS_RTE_TABLEFUNC = RTE_TABLEFUNC, /* TableFunc(.., column list) */
#endif
CITUS_RTE_VALUES = RTE_VALUES, /* VALUES (<exprlist>), (<exprlist>), ... */ CITUS_RTE_VALUES = RTE_VALUES, /* VALUES (<exprlist>), (<exprlist>), ... */
CITUS_RTE_CTE = RTE_CTE, /* common table expr (WITH list element) */ CITUS_RTE_CTE = RTE_CTE, /* common table expr (WITH list element) */
#if (PG_VERSION_NUM >= 100000)
CITUS_RTE_NAMEDTUPLESTORE = RTE_NAMEDTUPLESTORE, /* tuplestore, e.g. for triggers */ CITUS_RTE_NAMEDTUPLESTORE = RTE_NAMEDTUPLESTORE, /* tuplestore, e.g. for triggers */
#if (PG_VERSION_NUM >= 120000)
CITUS_RTE_RESULT = RTE_RESULT, /* RTE represents an empty FROM clause */
#endif #endif
CITUS_RTE_SHARD, CITUS_RTE_SHARD,
CITUS_RTE_REMOTE_QUERY CITUS_RTE_REMOTE_QUERY

View File

@ -24,6 +24,7 @@ extern bool SubqueryPushdown;
extern bool ShouldUseSubqueryPushDown(Query *originalQuery, Query *rewrittenQuery); extern bool ShouldUseSubqueryPushDown(Query *originalQuery, Query *rewrittenQuery);
extern bool JoinTreeContainsSubquery(Query *query); extern bool JoinTreeContainsSubquery(Query *query);
extern bool HasEmptyJoinTree(Query *query);
extern bool WhereClauseContainsSubquery(Query *query); extern bool WhereClauseContainsSubquery(Query *query);
extern bool SafeToPushdownWindowFunction(Query *query, StringInfo *errorDetail); extern bool SafeToPushdownWindowFunction(Query *query, StringInfo *errorDetail);
extern MultiNode * SubqueryMultiNodeTree(Query *originalQuery, extern MultiNode * SubqueryMultiNodeTree(Query *originalQuery,

View File

@ -15,7 +15,11 @@
#include "distributed/relation_restriction_equivalence.h" #include "distributed/relation_restriction_equivalence.h"
#include "nodes/pg_list.h" #include "nodes/pg_list.h"
#include "nodes/primnodes.h" #include "nodes/primnodes.h"
#if PG_VERSION_NUM >= 120000
#include "nodes/pathnodes.h"
#else
#include "nodes/relation.h" #include "nodes/relation.h"
#endif
extern List * GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery, extern List * GenerateSubplansForSubqueriesAndCTEs(uint64 planId, Query *originalQuery,

View File

@ -103,9 +103,7 @@ typedef struct WorkerTasksSharedStateData
{ {
/* Lock protecting workerNodesHash */ /* Lock protecting workerNodesHash */
int taskHashTrancheId; int taskHashTrancheId;
#if (PG_VERSION_NUM >= 100000)
char *taskHashTrancheName; char *taskHashTrancheName;
#endif
LWLock taskHashLock; LWLock taskHashLock;
bool conninfosValid; bool conninfosValid;
} WorkerTasksSharedStateData; } WorkerTasksSharedStateData;

View File

@ -16,7 +16,11 @@
#include "catalog/namespace.h" #include "catalog/namespace.h"
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
#if (PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000) #if (PG_VERSION_NUM >= 120000)
#include "optimizer/optimizer.h"
#endif
#if (PG_VERSION_NUM < 110000)
#include "access/hash.h" #include "access/hash.h"
#include "storage/fd.h" #include "storage/fd.h"
@ -240,5 +244,125 @@ RangeVarGetRelidInternal(const RangeVar *relation, LOCKMODE lockmode, uint32 fla
#endif #endif
#if PG_VERSION_NUM >= 120000
#define MakeSingleTupleTableSlotCompat MakeSingleTupleTableSlot
#define AllocSetContextCreateExtended AllocSetContextCreateInternal
#define NextCopyFromCompat NextCopyFrom
#define ArrayRef SubscriptingRef
#define T_ArrayRef T_SubscriptingRef
#define or_clause is_orclause
#define GetSysCacheOid1Compat GetSysCacheOid1
#define GetSysCacheOid2Compat GetSysCacheOid2
#define GetSysCacheOid3Compat GetSysCacheOid3
#define GetSysCacheOid4Compat GetSysCacheOid4
#define fcSetArg(fc, n, argval) \
(((fc)->args[n].isnull = false), ((fc)->args[n].value = (argval)))
#define fcSetArgNull(fc, n) \
(((fc)->args[n].isnull = true), ((fc)->args[n].value = (Datum) 0))
typedef struct
{
File fd;
off_t offset;
} FileCompat;
static inline int
FileWriteCompat(FileCompat *file, char *buffer, int amount, uint32 wait_event_info)
{
int count = FileWrite(file->fd, buffer, amount, file->offset, wait_event_info);
if (count > 0)
{
file->offset += count;
}
return count;
}
static inline int
FileReadCompat(FileCompat *file, char *buffer, int amount, uint32 wait_event_info)
{
int count = FileRead(file->fd, buffer, amount, file->offset, wait_event_info);
if (count > 0)
{
file->offset += count;
}
return count;
}
static inline FileCompat
FileCompatFromFileStart(File fileDesc)
{
FileCompat fc = {
.fd = fileDesc,
.offset = 0
};
return fc;
}
#else /* pre PG12 */
#define QTW_EXAMINE_RTES_BEFORE QTW_EXAMINE_RTES
#define MakeSingleTupleTableSlotCompat(tupleDesc, tts_opts) \
MakeSingleTupleTableSlot(tupleDesc)
#define NextCopyFromCompat(cstate, econtext, values, nulls) \
NextCopyFrom(cstate, econtext, values, nulls, NULL)
/*
* In PG12 GetSysCacheOid requires an oid column,
* whereas beforehand the oid column was implicit with WITH OIDS
*/
#define GetSysCacheOid1Compat(cacheId, oidcol, key1) \
GetSysCacheOid1(cacheId, key1)
#define GetSysCacheOid2Compat(cacheId, oidcol, key1, key2) \
GetSysCacheOid2(cacheId, key1, key2)
#define GetSysCacheOid3Compat(cacheId, oidcol, key1, key2, key3) \
GetSysCacheOid3(cacheId, key1, key2, key3)
#define GetSysCacheOid4Compat(cacheId, oidcol, key1, key2, key3, key4) \
GetSysCacheOid4(cacheId, key1, key2, key3, key4)
#define LOCAL_FCINFO(name, nargs) \
FunctionCallInfoData name ## data; \
FunctionCallInfoData *name = &name ## data
#define fcSetArg(fc, n, value) \
(((fc)->argnull[n] = false), ((fc)->arg[n] = (value)))
#define fcSetArgNull(fc, n) \
(((fc)->argnull[n] = true), ((fc)->arg[n] = (Datum) 0))
typedef struct
{
File fd;
} FileCompat;
static inline int
FileWriteCompat(FileCompat *file, char *buffer, int amount, uint32 wait_event_info)
{
return FileWrite(file->fd, buffer, amount, wait_event_info);
}
static inline int
FileReadCompat(FileCompat *file, char *buffer, int amount, uint32 wait_event_info)
{
return FileRead(file->fd, buffer, amount, wait_event_info);
}
static inline FileCompat
FileCompatFromFileStart(File fileDesc)
{
FileCompat fc = {
.fd = fileDesc,
};
return fc;
}
#endif /* PG12 */
#endif /* VERSION_COMPAT_H */ #endif /* VERSION_COMPAT_H */

View File

@ -20,6 +20,7 @@
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
#include "storage/fd.h" #include "storage/fd.h"
#include "utils/array.h" #include "utils/array.h"
#include "distributed/version_compat.h"
/* Number of rows to prefetch when reading data with a cursor */ /* Number of rows to prefetch when reading data with a cursor */
@ -79,6 +80,7 @@ typedef struct HashPartitionContext
FmgrInfo *comparisonFunction; FmgrInfo *comparisonFunction;
ShardInterval **syntheticShardIntervalArray; ShardInterval **syntheticShardIntervalArray;
uint32 partitionCount; uint32 partitionCount;
Oid collation;
bool hasUniformHashDistribution; bool hasUniformHashDistribution;
} HashPartitionContext; } HashPartitionContext;
@ -91,7 +93,7 @@ typedef struct HashPartitionContext
*/ */
typedef struct FileOutputStream typedef struct FileOutputStream
{ {
File fileDescriptor; FileCompat fileCompat;
StringInfo fileBuffer; StringInfo fileBuffer;
StringInfo filePath; StringInfo filePath;
} FileOutputStream; } FileOutputStream;

View File

@ -12,6 +12,10 @@ s/ port=[0-9]+ / port=xxxxx /g
s/placement [0-9]+/placement xxxxx/g s/placement [0-9]+/placement xxxxx/g
s/shard [0-9]+/shard xxxxx/g s/shard [0-9]+/shard xxxxx/g
s/assigned task [0-9]+ to node/assigned task to node/ s/assigned task [0-9]+ to node/assigned task to node/
s/node group [12] (but|does)/node group \1/
# Differing names can have differing table column widths
s/(-+\|)+-+/---/g
# In foreign_key_to_reference_table, normalize shard table names, etc in # In foreign_key_to_reference_table, normalize shard table names, etc in
# the generated plan # the generated plan
@ -69,3 +73,16 @@ s/(job_[0-9]+\/task_[0-9]+\/p_[0-9]+\.)[0-9]+/\1xxxx/g
# isolation_ref2ref_foreign_keys # isolation_ref2ref_foreign_keys
s/"(ref_table_[0-9]_|ref_table_[0-9]_value_fkey_)[0-9]+"/"\1xxxxxxx"/g s/"(ref_table_[0-9]_|ref_table_[0-9]_value_fkey_)[0-9]+"/"\1xxxxxxx"/g
# Line info varies between versions
/^LINE [0-9]+:.*$/d
/^ *\^$/d
# pg12 changes
s/Partitioned table "/Table "/g
s/\) TABLESPACE pg_default$/\)/g
s/invalid input syntax for type /invalid input syntax for /g
s/_id_ref_id_fkey/_id_fkey/g
s/_ref_id_id_fkey_/_ref_id_fkey_/g
s/fk_test_2_col1_col2_fkey/fk_test_2_col1_fkey/g
s/_id_other_column_ref_fkey/_id_fkey/g

View File

@ -1,41 +1,44 @@
# List of tests whose output we want to normalize, one per line # List of tests whose output we want to normalize, one per line
multi_alter_table_add_constraints custom_aggregate_support
multi_alter_table_statements
foreign_key_to_reference_table
failure_copy_on_hash failure_copy_on_hash
failure_savepoints
foreign_key_restriction_enforcement
failure_real_time_select failure_real_time_select
failure_savepoints
failure_vacuum failure_vacuum
foreign_key_restriction_enforcement
foreign_key_to_reference_table
intermediate_results
isolation_citus_dist_activity isolation_citus_dist_activity
isolation_ref2ref_foreign_keys isolation_ref2ref_foreign_keys
multi_alter_table_add_constraints
multi_alter_table_statements
multi_copy
multi_create_table_constraints
multi_explain
multi_foreign_key
multi_generate_ddl_commands
multi_having_pushdown
multi_insert_select multi_insert_select
multi_insert_select_conflict multi_insert_select_conflict
multi_multiuser
multi_name_lengths
multi_partition_pruning
multi_subtransactions
multi_modifying_xacts
multi_insert_select
sql_procedure
multi_reference_table
multi_create_table_constraints
# the following tests' output are
# normalized for EXPLAIN outputs
# where the executor name is wiped out
multi_join_order_tpch_small multi_join_order_tpch_small
multi_join_pruning multi_join_pruning
multi_master_protocol
multi_metadata_sync
multi_modifying_xacts
multi_multiuser
multi_mx_explain
multi_name_lengths
multi_null_minmax_value_pruning
multi_orderby_limit_pushdown multi_orderby_limit_pushdown
multi_partitioning
multi_partitioning_utils
multi_partition_pruning multi_partition_pruning
multi_reference_table
multi_select_distinct multi_select_distinct
multi_subquery_window_functions multi_subquery_window_functions
multi_subtransactions
multi_task_assignment_policy multi_task_assignment_policy
multi_view multi_view
multi_explain sql_procedure
multi_null_minmax_value_pruning
window_functions window_functions
multi_having_pushdown worker_check_invalid_arguments
multi_partitioning
multi_mx_explain
custom_aggregate_support

View File

@ -52,8 +52,8 @@ SELECT citus.dump_network_traffic();
(0,worker,"[""RowDescription(fieldcount=2,fields=['F(name=min,tableoid=0,colattrnum=0,typoid=23,typlen=4,typmod=-1,format_code=0)', 'F(name=max,tableoid=0,colattrnum=0,typoid=23,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=2,columns=[""C(length=0,value=b\\'\\')"", ""C(length=1,value=b\\'0\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=in_transaction_block)']") (0,worker,"[""RowDescription(fieldcount=2,fields=['F(name=min,tableoid=0,colattrnum=0,typoid=23,typlen=4,typmod=-1,format_code=0)', 'F(name=max,tableoid=0,colattrnum=0,typoid=23,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=2,columns=[""C(length=0,value=b\\'\\')"", ""C(length=1,value=b\\'0\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=in_transaction_block)']")
(0,coordinator,"['Query(query=COMMIT)']") (0,coordinator,"['Query(query=COMMIT)']")
(0,worker,"['CommandComplete(command=COMMIT)', 'ReadyForQuery(state=idle)']") (0,worker,"['CommandComplete(command=COMMIT)', 'ReadyForQuery(state=idle)']")
(0,coordinator,"['Query(query=COPY (SELECT count(1) AS count FROM copy_test_100400 copy_test WHERE true) TO STDOUT)']") (0,coordinator,"['Query(query=SELECT count(1) AS count FROM copy_test_100400 copy_test WHERE true)']")
(0,worker,"[""CopyOutResponse(format=0,columncount=1,columns=['Anonymous(format=0)'])"", ""CopyData(data=b'4\\\\n')"", 'CopyDone()', 'CommandComplete(command=COPY 1)', 'ReadyForQuery(state=idle)']") (0,worker,"[""RowDescription(fieldcount=1,fields=['F(name=count,tableoid=0,colattrnum=0,typoid=20,typlen=8,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']")
(20 rows) (20 rows)
---- all of the following tests test behavior with 2 shard placements ---- ---- all of the following tests test behavior with 2 shard placements ----
@ -168,7 +168,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()');
(1 row) (1 row)
SELECT count(1) FROM copy_test; SELECT count(1) FROM copy_test;
WARNING: could not consume data from worker node WARNING: connection error: localhost:9060
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
count count
------- -------
4 4
@ -224,26 +227,6 @@ SELECT count(1) FROM copy_test;
4 4
(1 row) (1 row)
-- we round-robin when picking which node to run pg_table_size on, this COPY runs it on
-- the other node, so the next copy will try to run it on our node
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
ORDER BY p.nodeport, p.placementid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
--------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+-------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 9060 | 110
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 57637 | 111
(4 rows)
SELECT count(1) FROM copy_test;
count
-------
8
(1 row)
---- kill the connection when we try to get the min, max of the table ---- ---- kill the connection when we try to get the min, max of the table ----
SELECT citus.mitmproxy('conn.onQuery(query="SELECT min\(key\), max\(key\)").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="SELECT min\(key\), max\(key\)").kill()');
mitmproxy mitmproxy
@ -266,14 +249,12 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
--------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- --------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+-------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 9060 | 110 (2 rows)
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 57637 | 111
(4 rows)
SELECT count(1) FROM copy_test; SELECT count(1) FROM copy_test;
count count
------- -------
8 4
(1 row) (1 row)
---- kill the connection when we try to COMMIT ---- ---- kill the connection when we try to COMMIT ----
@ -296,16 +277,14 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
--------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- --------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+-------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 9060 | 110 copy_test | 100408 | t | 0 | 3 | 100408 | 1 | 8192 | localhost | 57637 | 112
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 57637 | 111 copy_test | 100408 | t | 0 | 3 | 100408 | 3 | 8192 | localhost | 9060 | 113
copy_test | 100409 | t | 0 | 3 | 100409 | 3 | 8192 | localhost | 9060 | 114 (4 rows)
copy_test | 100409 | t | 0 | 3 | 100409 | 1 | 8192 | localhost | 57637 | 115
(6 rows)
SELECT count(1) FROM copy_test; SELECT count(1) FROM copy_test;
count count
------- -------
12 8
(1 row) (1 row)
-- ==== Clean up, we're done here ==== -- ==== Clean up, we're done here ====

View File

@ -52,8 +52,8 @@ SELECT citus.dump_network_traffic();
(0,worker,"[""RowDescription(fieldcount=2,fields=['F(name=min,tableoid=0,colattrnum=0,typoid=23,typlen=4,typmod=-1,format_code=0)', 'F(name=max,tableoid=0,colattrnum=0,typoid=23,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=2,columns=[""C(length=0,value=b\\'\\')"", ""C(length=1,value=b\\'0\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=in_transaction_block)']") (0,worker,"[""RowDescription(fieldcount=2,fields=['F(name=min,tableoid=0,colattrnum=0,typoid=23,typlen=4,typmod=-1,format_code=0)', 'F(name=max,tableoid=0,colattrnum=0,typoid=23,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=2,columns=[""C(length=0,value=b\\'\\')"", ""C(length=1,value=b\\'0\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=in_transaction_block)']")
(0,coordinator,"['Query(query=COMMIT)']") (0,coordinator,"['Query(query=COMMIT)']")
(0,worker,"['CommandComplete(command=COMMIT)', 'ReadyForQuery(state=idle)']") (0,worker,"['CommandComplete(command=COMMIT)', 'ReadyForQuery(state=idle)']")
(0,coordinator,"['Query(query=SELECT count(1) AS count FROM copy_test_100400 copy_test WHERE true)']") (0,coordinator,"['Query(query=COPY (SELECT count(1) AS count FROM copy_test_100400 copy_test WHERE true) TO STDOUT)']")
(0,worker,"[""RowDescription(fieldcount=1,fields=['F(name=count,tableoid=0,colattrnum=0,typoid=20,typlen=8,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']") (0,worker,"[""CopyOutResponse(format=0,columncount=1,columns=['Anonymous(format=0)'])"", ""CopyData(data=b'4\\\\n')"", 'CopyDone()', 'CommandComplete(command=COPY 1)', 'ReadyForQuery(state=idle)']")
(20 rows) (20 rows)
---- all of the following tests test behavior with 2 shard placements ---- ---- all of the following tests test behavior with 2 shard placements ----
@ -168,10 +168,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()');
(1 row) (1 row)
SELECT count(1) FROM copy_test; SELECT count(1) FROM copy_test;
WARNING: connection error: localhost:9060 WARNING: could not consume data from worker node
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
count count
------- -------
4 4
@ -227,26 +224,6 @@ SELECT count(1) FROM copy_test;
4 4
(1 row) (1 row)
-- we round-robin when picking which node to run pg_table_size on, this COPY runs it on
-- the other node, so the next copy will try to run it on our node
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
ORDER BY p.nodeport, p.placementid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
--------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+-------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 9060 | 110
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 57637 | 111
(4 rows)
SELECT count(1) FROM copy_test;
count
-------
8
(1 row)
---- kill the connection when we try to get the min, max of the table ---- ---- kill the connection when we try to get the min, max of the table ----
SELECT citus.mitmproxy('conn.onQuery(query="SELECT min\(key\), max\(key\)").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="SELECT min\(key\), max\(key\)").kill()');
mitmproxy mitmproxy
@ -269,14 +246,12 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
--------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- --------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+-------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 9060 | 110 (2 rows)
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 57637 | 111
(4 rows)
SELECT count(1) FROM copy_test; SELECT count(1) FROM copy_test;
count count
------- -------
8 4
(1 row) (1 row)
---- kill the connection when we try to COMMIT ---- ---- kill the connection when we try to COMMIT ----
@ -299,16 +274,14 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
--------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+------------- --------------+---------+--------------+---------------+---------------+---------+------------+-------------+-----------+----------+-------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101 copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 9060 | 110 copy_test | 100408 | t | 0 | 3 | 100408 | 1 | 8192 | localhost | 57637 | 112
copy_test | 100407 | t | 0 | 3 | 100407 | 1 | 8192 | localhost | 57637 | 111 copy_test | 100408 | t | 0 | 3 | 100408 | 3 | 8192 | localhost | 9060 | 113
copy_test | 100409 | t | 0 | 3 | 100409 | 3 | 8192 | localhost | 9060 | 114 (4 rows)
copy_test | 100409 | t | 0 | 3 | 100409 | 1 | 8192 | localhost | 57637 | 115
(6 rows)
SELECT count(1) FROM copy_test; SELECT count(1) FROM copy_test;
count count
------- -------
12 8
(1 row) (1 row)
-- ==== Clean up, we're done here ==== -- ==== Clean up, we're done here ====

View File

@ -45,7 +45,11 @@ SELECT citus.dump_network_traffic();
(0,worker,"['CommandComplete(command=COPY 4)', 'ReadyForQuery(state=in_transaction_block)']") (0,worker,"['CommandComplete(command=COPY 4)', 'ReadyForQuery(state=in_transaction_block)']")
(0,coordinator,"['Query(query=COMMIT)']") (0,coordinator,"['Query(query=COMMIT)']")
(0,worker,"['CommandComplete(command=COMMIT)', 'ReadyForQuery(state=idle)']") (0,worker,"['CommandComplete(command=COMMIT)', 'ReadyForQuery(state=idle)']")
(10 rows) (1,coordinator,"[initial message]")
(1,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
(1,coordinator,"['Query(query=SELECT count(1) AS count FROM public.copy_test_XXXXXX copy_test)']")
(1,worker,"[""RowDescription(fieldcount=1,fields=['F(name=count,tableoid=0,colattrnum=0,typoid=20,typlen=8,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']")
(14 rows)
-- ==== kill the connection when we try to start a transaction ==== -- ==== kill the connection when we try to start a transaction ====
-- the query should abort -- the query should abort

View File

@ -305,7 +305,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
-- hide the error message (it has the PID)... -- hide the error message (it has the PID)...
-- we'll test for the txn side-effects to ensure it didn't run -- we'll test for the txn side-effects to ensure it didn't run
SET client_min_messages TO FATAL; SET client_min_messages TO ERROR;
BEGIN; BEGIN;
DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 1;
DELETE FROM dml_test WHERE id = 2; DELETE FROM dml_test WHERE id = 2;

View File

@ -305,7 +305,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
-- hide the error message (it has the PID)... -- hide the error message (it has the PID)...
-- we'll test for the txn side-effects to ensure it didn't run -- we'll test for the txn side-effects to ensure it didn't run
SET client_min_messages TO FATAL; SET client_min_messages TO ERROR;
BEGIN; BEGIN;
DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 1;
DELETE FROM dml_test WHERE id = 2; DELETE FROM dml_test WHERE id = 2;

View File

@ -20,31 +20,31 @@ SELECT create_distributed_table('select_test', 'key');
(1 row) (1 row)
-- put data in shard for which mitm node is first placement -- put data in shard for which mitm node is first placement
INSERT INTO select_test VALUES (2, 'test data'); INSERT INTO select_test VALUES (3, 'test data');
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
mitmproxy mitmproxy
----------- -----------
(1 row) (1 row)
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
WARNING: server closed the connection unexpectedly WARNING: connection error: localhost:9060
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally This probably means the server terminated abnormally
before or while processing the request. before or while processing the request.
CONTEXT: while executing command on localhost:9060
key | value key | value
-----+----------- -----+-----------
2 | test data 3 | test data
(1 row) (1 row)
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
WARNING: server closed the connection unexpectedly WARNING: connection error: localhost:9060
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally This probably means the server terminated abnormally
before or while processing the request. before or while processing the request.
CONTEXT: while executing command on localhost:9060
key | value key | value
-----+----------- -----+-----------
2 | test data 3 | test data
(1 row) (1 row)
-- kill after first SELECT; txn should work (though placement marked bad) -- kill after first SELECT; txn should work (though placement marked bad)
@ -55,34 +55,32 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
(1 row) (1 row)
BEGIN; BEGIN;
INSERT INTO select_test VALUES (2, 'more data'); INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
WARNING: server closed the connection unexpectedly WARNING: connection error: localhost:9060
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally This probably means the server terminated abnormally
before or while processing the request. before or while processing the request.
CONTEXT: while executing command on localhost:9060
key | value key | value
-----+----------- -----+-----------
2 | test data 3 | test data
2 | more data 3 | more data
(2 rows) (2 rows)
INSERT INTO select_test VALUES (2, 'even more data'); INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:9060
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
key | value key | value
-----+---------------- -----+----------------
2 | test data 3 | test data
2 | more data 3 | more data
2 | even more data 3 | even more data
(3 rows) (3 rows)
COMMIT; COMMIT;
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
-- some clean up -- some clean up
UPDATE pg_dist_shard_placement SET shardstate = 1 UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN ( WHERE shardid IN (
@ -91,16 +89,16 @@ WHERE shardid IN (
TRUNCATE select_test; TRUNCATE select_test;
-- now the same tests with query cancellation -- now the same tests with query cancellation
-- put data in shard for which mitm node is first placement -- put data in shard for which mitm node is first placement
INSERT INTO select_test VALUES (2, 'test data'); INSERT INTO select_test VALUES (3, 'test data');
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')');
mitmproxy mitmproxy
----------- -----------
(1 row) (1 row)
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- cancel after first SELECT; txn should fail and nothing should be marked as invalid -- cancel after first SELECT; txn should fail and nothing should be marked as invalid
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')');
@ -110,8 +108,8 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pi
(1 row) (1 row)
BEGIN; BEGIN;
INSERT INTO select_test VALUES (2, 'more data'); INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
COMMIT; COMMIT;
-- show that all placements are OK -- show that all placements are OK
@ -134,15 +132,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_b
(1 row) (1 row)
BEGIN; BEGIN;
INSERT INTO select_test VALUES (2, 'more data'); INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
key | value key | value
-----+----------- -----+-----------
2 | more data 3 | more data
(1 row) (1 row)
INSERT INTO select_test VALUES (2, 'even more data'); INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
COMMIT; COMMIT;
-- error after second SELECT; txn should work (though placement marked bad) -- error after second SELECT; txn should work (though placement marked bad)
@ -153,32 +151,26 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).reset()');
(1 row) (1 row)
BEGIN; BEGIN;
INSERT INTO select_test VALUES (2, 'more data'); INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
key | value key | value
-----+----------- -----+-----------
2 | more data 3 | more data
(1 row) (1 row)
INSERT INTO select_test VALUES (2, 'even more data'); INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
WARNING: server closed the connection unexpectedly WARNING: connection error: localhost:9060
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally This probably means the server terminated abnormally
before or while processing the request. before or while processing the request.
CONTEXT: while executing command on localhost:9060
key | value key | value
-----+---------------- -----+----------------
2 | more data 3 | more data
2 | even more data 3 | even more data
(2 rows) (2 rows)
COMMIT; COMMIT;
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(2).kill()'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(2).kill()');
mitmproxy mitmproxy
----------- -----------
@ -223,11 +215,10 @@ SELECT * FROM select_test WHERE key = 1;
(1 row) (1 row)
SELECT * FROM select_test WHERE key = 1; SELECT * FROM select_test WHERE key = 1;
WARNING: server closed the connection unexpectedly ERROR: connection error: localhost:9060
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally This probably means the server terminated abnormally
before or while processing the request. before or while processing the request.
CONTEXT: while executing command on localhost:9060
ERROR: could not receive query results
-- now the same test with query cancellation -- now the same test with query cancellation
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')');
mitmproxy mitmproxy

View File

@ -20,31 +20,31 @@ SELECT create_distributed_table('select_test', 'key');
(1 row) (1 row)
-- put data in shard for which mitm node is first placement -- put data in shard for which mitm node is first placement
INSERT INTO select_test VALUES (2, 'test data'); INSERT INTO select_test VALUES (3, 'test data');
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
mitmproxy mitmproxy
----------- -----------
(1 row) (1 row)
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:9060 WARNING: server closed the connection unexpectedly
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally This probably means the server terminated abnormally
before or while processing the request. before or while processing the request.
CONTEXT: while executing command on localhost:9060
key | value key | value
-----+----------- -----+-----------
2 | test data 3 | test data
(1 row) (1 row)
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:9060 WARNING: server closed the connection unexpectedly
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally This probably means the server terminated abnormally
before or while processing the request. before or while processing the request.
CONTEXT: while executing command on localhost:9060
key | value key | value
-----+----------- -----+-----------
2 | test data 3 | test data
(1 row) (1 row)
-- kill after first SELECT; txn should work (though placement marked bad) -- kill after first SELECT; txn should work (though placement marked bad)
@ -55,32 +55,34 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
(1 row) (1 row)
BEGIN; BEGIN;
INSERT INTO select_test VALUES (2, 'more data'); INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:9060 WARNING: server closed the connection unexpectedly
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally This probably means the server terminated abnormally
before or while processing the request. before or while processing the request.
CONTEXT: while executing command on localhost:9060
key | value key | value
-----+----------- -----+-----------
2 | test data 3 | test data
2 | more data 3 | more data
(2 rows) (2 rows)
INSERT INTO select_test VALUES (2, 'even more data'); INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:9060
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
key | value key | value
-----+---------------- -----+----------------
2 | test data 3 | test data
2 | more data 3 | more data
2 | even more data 3 | even more data
(3 rows) (3 rows)
COMMIT; COMMIT;
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
-- some clean up -- some clean up
UPDATE pg_dist_shard_placement SET shardstate = 1 UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN ( WHERE shardid IN (
@ -89,16 +91,16 @@ WHERE shardid IN (
TRUNCATE select_test; TRUNCATE select_test;
-- now the same tests with query cancellation -- now the same tests with query cancellation
-- put data in shard for which mitm node is first placement -- put data in shard for which mitm node is first placement
INSERT INTO select_test VALUES (2, 'test data'); INSERT INTO select_test VALUES (3, 'test data');
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')');
mitmproxy mitmproxy
----------- -----------
(1 row) (1 row)
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
-- cancel after first SELECT; txn should fail and nothing should be marked as invalid -- cancel after first SELECT; txn should fail and nothing should be marked as invalid
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')');
@ -108,8 +110,8 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pi
(1 row) (1 row)
BEGIN; BEGIN;
INSERT INTO select_test VALUES (2, 'more data'); INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
COMMIT; COMMIT;
-- show that all placements are OK -- show that all placements are OK
@ -132,15 +134,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_b
(1 row) (1 row)
BEGIN; BEGIN;
INSERT INTO select_test VALUES (2, 'more data'); INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
key | value key | value
-----+----------- -----+-----------
2 | more data 3 | more data
(1 row) (1 row)
INSERT INTO select_test VALUES (2, 'even more data'); INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
ERROR: canceling statement due to user request ERROR: canceling statement due to user request
COMMIT; COMMIT;
-- error after second SELECT; txn should work (though placement marked bad) -- error after second SELECT; txn should work (though placement marked bad)
@ -151,26 +153,32 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).reset()');
(1 row) (1 row)
BEGIN; BEGIN;
INSERT INTO select_test VALUES (2, 'more data'); INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
key | value key | value
-----+----------- -----+-----------
2 | more data 3 | more data
(1 row) (1 row)
INSERT INTO select_test VALUES (2, 'even more data'); INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 2; SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:9060 WARNING: server closed the connection unexpectedly
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally This probably means the server terminated abnormally
before or while processing the request. before or while processing the request.
CONTEXT: while executing command on localhost:9060
key | value key | value
-----+---------------- -----+----------------
2 | more data 3 | more data
2 | even more data 3 | even more data
(2 rows) (2 rows)
COMMIT; COMMIT;
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
WARNING: connection not open
CONTEXT: while executing command on localhost:9060
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(2).kill()'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(2).kill()');
mitmproxy mitmproxy
----------- -----------
@ -215,10 +223,11 @@ SELECT * FROM select_test WHERE key = 1;
(1 row) (1 row)
SELECT * FROM select_test WHERE key = 1; SELECT * FROM select_test WHERE key = 1;
ERROR: connection error: localhost:9060 WARNING: server closed the connection unexpectedly
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally This probably means the server terminated abnormally
before or while processing the request. before or while processing the request.
CONTEXT: while executing command on localhost:9060
ERROR: could not receive query results
-- now the same test with query cancellation -- now the same test with query cancellation
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')'); SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')');
mitmproxy mitmproxy

View File

@ -532,7 +532,7 @@ BEGIN;
DEBUG: switching to sequential query execution mode DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
COPY on_update_fkey_table FROM STDIN WITH CSV; COPY on_update_fkey_table FROM STDIN WITH CSV;
ERROR: insert or update on table "on_update_fkey_table_2380005" violates foreign key constraint "fkey_2380005" ERROR: insert or update on table "on_update_fkey_table_2380004" violates foreign key constraint "fkey_2380004"
DETAIL: Key (value_1)=(101) is not present in table "reference_table_2380001". DETAIL: Key (value_1)=(101) is not present in table "reference_table_2380001".
ROLLBACK; ROLLBACK;
-- case 2.8: UPDATE to a reference table is followed by TRUNCATE -- case 2.8: UPDATE to a reference table is followed by TRUNCATE
@ -817,6 +817,8 @@ ERROR: cannot execute DDL on reference relation "transitive_reference_table" be
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK; ROLLBACK;
-- case 4.5: SELECT to a dist table is follwed by a TRUNCATE -- case 4.5: SELECT to a dist table is follwed by a TRUNCATE
\set VERBOSITY terse
SET client_min_messages to LOG;
BEGIN; BEGIN;
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
count count
@ -825,11 +827,8 @@ BEGIN;
(1 row) (1 row)
TRUNCATE reference_table CASCADE; TRUNCATE reference_table CASCADE;
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
NOTICE: truncate cascades to table "on_update_fkey_table" NOTICE: truncate cascades to table "on_update_fkey_table"
ERROR: cannot execute DDL on reference relation "reference_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction ERROR: cannot execute DDL on reference relation "reference_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
@ -839,12 +838,9 @@ BEGIN;
(1 row) (1 row)
TRUNCATE transitive_reference_table CASCADE; TRUNCATE transitive_reference_table CASCADE;
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
NOTICE: truncate cascades to table "reference_table" NOTICE: truncate cascades to table "reference_table"
NOTICE: truncate cascades to table "on_update_fkey_table" NOTICE: truncate cascades to table "on_update_fkey_table"
ERROR: cannot execute DDL on reference relation "transitive_reference_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction ERROR: cannot execute DDL on reference relation "transitive_reference_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK; ROLLBACK;
-- case 4.6: Router SELECT to a dist table is followed by a TRUNCATE -- case 4.6: Router SELECT to a dist table is followed by a TRUNCATE
BEGIN; BEGIN;
@ -855,19 +851,7 @@ BEGIN;
(1 row) (1 row)
TRUNCATE reference_table CASCADE; TRUNCATE reference_table CASCADE;
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
NOTICE: truncate cascades to table "on_update_fkey_table" NOTICE: truncate cascades to table "on_update_fkey_table"
DEBUG: truncate cascades to table "on_update_fkey_table_2380003"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "on_update_fkey_table_2380005"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "on_update_fkey_table_2380002"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380004"
DETAIL: NOTICE from localhost:57637
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
SELECT count(*) FROM on_update_fkey_table WHERE id = 9; SELECT count(*) FROM on_update_fkey_table WHERE id = 9;
@ -877,34 +861,11 @@ BEGIN;
(1 row) (1 row)
TRUNCATE transitive_reference_table CASCADE; TRUNCATE transitive_reference_table CASCADE;
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
NOTICE: truncate cascades to table "reference_table" NOTICE: truncate cascades to table "reference_table"
NOTICE: truncate cascades to table "on_update_fkey_table" NOTICE: truncate cascades to table "on_update_fkey_table"
DEBUG: truncate cascades to table "reference_table_2380001"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "on_update_fkey_table_2380003"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "on_update_fkey_table_2380005"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "reference_table_2380001"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380002"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380004"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380002"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380003"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "on_update_fkey_table_2380004"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380005"
DETAIL: NOTICE from localhost:57638
DEBUG: building index "transitive_reference_table_pkey" on table "transitive_reference_table" serially
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
ROLLBACK; ROLLBACK;
RESET client_min_messages;
\set VERBOSITY default
-- case 5.1: Parallel UPDATE on distributed table follow by a SELECT -- case 5.1: Parallel UPDATE on distributed table follow by a SELECT
BEGIN; BEGIN;
UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15;
@ -954,18 +915,12 @@ ROLLBACK;
BEGIN; BEGIN;
UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint; ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table"
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
DEBUG: validating foreign key constraint "fkey"
ERROR: cannot execute DDL on reference relation "reference_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction ERROR: cannot execute DDL on reference relation "reference_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint; ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table"
DEBUG: building index "transitive_reference_table_pkey" on table "transitive_reference_table" serially
DEBUG: validating foreign key constraint "fkey"
ERROR: cannot execute DDL on reference relation "transitive_reference_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction ERROR: cannot execute DDL on reference relation "transitive_reference_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK; ROLLBACK;
@ -985,16 +940,10 @@ ROLLBACK;
-- case 6:2: Related parallel DDL on distributed table followed by SELECT on ref. table -- case 6:2: Related parallel DDL on distributed table followed by SELECT on ref. table
BEGIN; BEGIN;
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint; ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
DEBUG: validating foreign key constraint "fkey"
UPDATE reference_table SET id = 160 WHERE id = 15; UPDATE reference_table SET id = 160 WHERE id = 15;
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint; ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
DEBUG: validating foreign key constraint "fkey"
UPDATE transitive_reference_table SET id = 160 WHERE id = 15; UPDATE transitive_reference_table SET id = 160 WHERE id = 15;
ROLLBACK; ROLLBACK;
-- case 6:3: Unrelated parallel DDL on distributed table followed by UPDATE on ref. table -- case 6:3: Unrelated parallel DDL on distributed table followed by UPDATE on ref. table
@ -1090,26 +1039,14 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
@ -1123,39 +1060,21 @@ ROLLBACK;
-- already executed a parallel query -- already executed a parallel query
BEGIN; BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "tt4_pkey" for table "tt4"
DEBUG: building index "tt4_pkey" on table "tt4" serially
SELECT create_distributed_table('tt4', 'id'); SELECT create_distributed_table('tt4', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
(1 row) (1 row)
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
ERROR: cannot distribute relation "test_table_2" in this transaction because it has a foreign key to a reference table ERROR: cannot distribute relation "test_table_2" in this transaction because it has a foreign key to a reference table
DETAIL: If a hash distributed table has a foreign key to a reference table, it has to be created in sequential mode before any parallel commands have been executed in the same transaction DETAIL: If a hash distributed table has a foreign key to a reference table, it has to be created in sequential mode before any parallel commands have been executed in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
@ -1170,39 +1089,21 @@ ROLLBACK;
BEGIN; BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "tt4_pkey" for table "tt4"
DEBUG: building index "tt4_pkey" on table "tt4" serially
SELECT create_distributed_table('tt4', 'id'); SELECT create_distributed_table('tt4', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
(1 row) (1 row)
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
@ -1218,26 +1119,14 @@ ROLLBACK;
BEGIN; BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
@ -1258,26 +1147,14 @@ BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
@ -1293,26 +1170,14 @@ COMMIT;
-- changed -- changed
BEGIN; BEGIN;
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
(1 row) (1 row)
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
@ -1332,26 +1197,14 @@ ROLLBACK;
BEGIN; BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
(1 row) (1 row)
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
@ -1374,13 +1227,7 @@ BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
ERROR: cannot distribute relation "test_table_2" in this transaction because it has a foreign key to a reference table ERROR: cannot distribute relation "test_table_2" in this transaction because it has a foreign key to a reference table
DETAIL: If a hash distributed table has a foreign key to a reference table, it has to be created in sequential mode before any parallel commands have been executed in the same transaction DETAIL: If a hash distributed table has a foreign key to a reference table, it has to be created in sequential mode before any parallel commands have been executed in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
@ -1401,32 +1248,17 @@ ROLLBACK;
BEGIN; BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i; INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i;
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "test_table_1" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
NOTICE: Copying data from local table... NOTICE: Copying data from local table...
DEBUG: Copied 101 rows
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty
HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty. HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty.
-- make sure that the output isn't too verbose -- make sure that the output isn't too verbose
@ -1441,30 +1273,17 @@ BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i; INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i;
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
NOTICE: Copying data from local table... NOTICE: Copying data from local table...
DEBUG: Copied 101 rows
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty
HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty. HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty.
@ -1479,28 +1298,14 @@ COMMIT;
BEGIN; BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "test_table_1" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
@ -1508,9 +1313,7 @@ DETAIL: NOTICE from localhost:57637
-- and maybe some other test -- and maybe some other test
CREATE INDEX i1 ON test_table_1(id); CREATE INDEX i1 ON test_table_1(id);
DEBUG: building index "i1" on table "test_table_1" serially
ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0); ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0);
DEBUG: verifying table "test_table_2"
SELECT count(*) FROM test_table_2; SELECT count(*) FROM test_table_2;
count count
------- -------
@ -1655,11 +1458,8 @@ DEBUG: Plan 184 query after replacing subqueries and CTEs: DELETE FROM test_fke
ROLLBACK; ROLLBACK;
RESET client_min_messages; RESET client_min_messages;
\set VERBOSITY terse
DROP SCHEMA test_fkey_to_ref_in_tx CASCADE; DROP SCHEMA test_fkey_to_ref_in_tx CASCADE;
NOTICE: drop cascades to 5 other objects NOTICE: drop cascades to 5 other objects
DETAIL: drop cascades to table transitive_reference_table \set VERBOSITY default
drop cascades to table on_update_fkey_table
drop cascades to table unrelated_dist_table
drop cascades to table reference_table
drop cascades to table distributed_table
SET search_path TO public; SET search_path TO public;

View File

@ -817,6 +817,8 @@ ERROR: cannot execute DDL on reference relation "transitive_reference_table" be
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK; ROLLBACK;
-- case 4.5: SELECT to a dist table is follwed by a TRUNCATE -- case 4.5: SELECT to a dist table is follwed by a TRUNCATE
\set VERBOSITY terse
SET client_min_messages to LOG;
BEGIN; BEGIN;
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
count count
@ -825,11 +827,8 @@ BEGIN;
(1 row) (1 row)
TRUNCATE reference_table CASCADE; TRUNCATE reference_table CASCADE;
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
NOTICE: truncate cascades to table "on_update_fkey_table" NOTICE: truncate cascades to table "on_update_fkey_table"
ERROR: cannot execute DDL on reference relation "reference_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction ERROR: cannot execute DDL on reference relation "reference_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
@ -839,12 +838,9 @@ BEGIN;
(1 row) (1 row)
TRUNCATE transitive_reference_table CASCADE; TRUNCATE transitive_reference_table CASCADE;
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
NOTICE: truncate cascades to table "reference_table" NOTICE: truncate cascades to table "reference_table"
NOTICE: truncate cascades to table "on_update_fkey_table" NOTICE: truncate cascades to table "on_update_fkey_table"
ERROR: cannot execute DDL on reference relation "transitive_reference_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction ERROR: cannot execute DDL on reference relation "transitive_reference_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK; ROLLBACK;
-- case 4.6: Router SELECT to a dist table is followed by a TRUNCATE -- case 4.6: Router SELECT to a dist table is followed by a TRUNCATE
BEGIN; BEGIN;
@ -855,19 +851,7 @@ BEGIN;
(1 row) (1 row)
TRUNCATE reference_table CASCADE; TRUNCATE reference_table CASCADE;
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
NOTICE: truncate cascades to table "on_update_fkey_table" NOTICE: truncate cascades to table "on_update_fkey_table"
DEBUG: truncate cascades to table "on_update_fkey_table_2380003"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "on_update_fkey_table_2380005"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "on_update_fkey_table_2380002"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380004"
DETAIL: NOTICE from localhost:57637
DEBUG: building index "reference_table_pkey" on table "reference_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
SELECT count(*) FROM on_update_fkey_table WHERE id = 9; SELECT count(*) FROM on_update_fkey_table WHERE id = 9;
@ -877,34 +861,11 @@ BEGIN;
(1 row) (1 row)
TRUNCATE transitive_reference_table CASCADE; TRUNCATE transitive_reference_table CASCADE;
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
NOTICE: truncate cascades to table "reference_table" NOTICE: truncate cascades to table "reference_table"
NOTICE: truncate cascades to table "on_update_fkey_table" NOTICE: truncate cascades to table "on_update_fkey_table"
DEBUG: truncate cascades to table "reference_table_2380001"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "on_update_fkey_table_2380003"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "on_update_fkey_table_2380005"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "reference_table_2380001"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380002"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380004"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380002"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380004"
DETAIL: NOTICE from localhost:57637
DEBUG: truncate cascades to table "on_update_fkey_table_2380003"
DETAIL: NOTICE from localhost:57638
DEBUG: truncate cascades to table "on_update_fkey_table_2380005"
DETAIL: NOTICE from localhost:57638
DEBUG: building index "transitive_reference_table_pkey" on table "transitive_reference_table"
DEBUG: building index "reference_table_pkey" on table "reference_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
ROLLBACK; ROLLBACK;
RESET client_min_messages;
\set VERBOSITY default
-- case 5.1: Parallel UPDATE on distributed table follow by a SELECT -- case 5.1: Parallel UPDATE on distributed table follow by a SELECT
BEGIN; BEGIN;
UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15;
@ -954,18 +915,12 @@ ROLLBACK;
BEGIN; BEGIN;
UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint; ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table"
DEBUG: building index "reference_table_pkey" on table "reference_table"
DEBUG: validating foreign key constraint "fkey"
ERROR: cannot execute DDL on reference relation "reference_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction ERROR: cannot execute DDL on reference relation "reference_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint; ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table"
DEBUG: building index "transitive_reference_table_pkey" on table "transitive_reference_table"
DEBUG: validating foreign key constraint "fkey"
ERROR: cannot execute DDL on reference relation "transitive_reference_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction ERROR: cannot execute DDL on reference relation "transitive_reference_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK; ROLLBACK;
@ -985,16 +940,10 @@ ROLLBACK;
-- case 6:2: Related parallel DDL on distributed table followed by SELECT on ref. table -- case 6:2: Related parallel DDL on distributed table followed by SELECT on ref. table
BEGIN; BEGIN;
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint; ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
DEBUG: validating foreign key constraint "fkey"
UPDATE reference_table SET id = 160 WHERE id = 15; UPDATE reference_table SET id = 160 WHERE id = 15;
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint; ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table"
DEBUG: validating foreign key constraint "fkey"
UPDATE transitive_reference_table SET id = 160 WHERE id = 15; UPDATE transitive_reference_table SET id = 160 WHERE id = 15;
ROLLBACK; ROLLBACK;
-- case 6:3: Unrelated parallel DDL on distributed table followed by UPDATE on ref. table -- case 6:3: Unrelated parallel DDL on distributed table followed by UPDATE on ref. table
@ -1090,26 +1039,14 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
@ -1123,39 +1060,21 @@ ROLLBACK;
-- already executed a parallel query -- already executed a parallel query
BEGIN; BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "tt4_pkey" for table "tt4"
DEBUG: building index "tt4_pkey" on table "tt4"
SELECT create_distributed_table('tt4', 'id'); SELECT create_distributed_table('tt4', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
(1 row) (1 row)
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
ERROR: cannot distribute relation "test_table_2" in this transaction because it has a foreign key to a reference table ERROR: cannot distribute relation "test_table_2" in this transaction because it has a foreign key to a reference table
DETAIL: If a hash distributed table has a foreign key to a reference table, it has to be created in sequential mode before any parallel commands have been executed in the same transaction DETAIL: If a hash distributed table has a foreign key to a reference table, it has to be created in sequential mode before any parallel commands have been executed in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
@ -1170,39 +1089,21 @@ ROLLBACK;
BEGIN; BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "tt4_pkey" for table "tt4"
DEBUG: building index "tt4_pkey" on table "tt4"
SELECT create_distributed_table('tt4', 'id'); SELECT create_distributed_table('tt4', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
(1 row) (1 row)
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
@ -1218,26 +1119,14 @@ ROLLBACK;
BEGIN; BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
@ -1258,26 +1147,14 @@ BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
@ -1293,26 +1170,14 @@ COMMIT;
-- changed -- changed
BEGIN; BEGIN;
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
(1 row) (1 row)
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
@ -1332,26 +1197,14 @@ ROLLBACK;
BEGIN; BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
(1 row) (1 row)
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_reference_table create_reference_table
------------------------ ------------------------
@ -1374,13 +1227,7 @@ BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
ERROR: cannot distribute relation "test_table_2" in this transaction because it has a foreign key to a reference table ERROR: cannot distribute relation "test_table_2" in this transaction because it has a foreign key to a reference table
DETAIL: If a hash distributed table has a foreign key to a reference table, it has to be created in sequential mode before any parallel commands have been executed in the same transaction DETAIL: If a hash distributed table has a foreign key to a reference table, it has to be created in sequential mode before any parallel commands have been executed in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
@ -1401,32 +1248,17 @@ ROLLBACK;
BEGIN; BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i; INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i;
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "test_table_1" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
NOTICE: Copying data from local table... NOTICE: Copying data from local table...
DEBUG: Copied 101 rows
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty
HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty. HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty.
-- make sure that the output isn't too verbose -- make sure that the output isn't too verbose
@ -1441,30 +1273,17 @@ BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i; INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i;
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i;
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
NOTICE: Copying data from local table... NOTICE: Copying data from local table...
DEBUG: Copied 101 rows
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty
HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty. HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty.
@ -1479,28 +1298,14 @@ COMMIT;
BEGIN; BEGIN;
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1"
DEBUG: building index "test_table_1_pkey" on table "test_table_1"
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2"
DEBUG: building index "test_table_2_pkey" on table "test_table_2"
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
DEBUG: switching to sequential query execution mode
DETAIL: Reference relation "test_table_1" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
create_reference_table create_reference_table
------------------------ ------------------------
(1 row) (1 row)
SELECT create_distributed_table('test_table_2', 'id'); SELECT create_distributed_table('test_table_2', 'id');
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57638
DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping
DETAIL: NOTICE from localhost:57637
create_distributed_table create_distributed_table
-------------------------- --------------------------
@ -1508,9 +1313,7 @@ DETAIL: NOTICE from localhost:57637
-- and maybe some other test -- and maybe some other test
CREATE INDEX i1 ON test_table_1(id); CREATE INDEX i1 ON test_table_1(id);
DEBUG: building index "i1" on table "test_table_1"
ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0); ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0);
DEBUG: verifying table "test_table_2"
SELECT count(*) FROM test_table_2; SELECT count(*) FROM test_table_2;
count count
------- -------
@ -1655,11 +1458,8 @@ DEBUG: Plan 184 query after replacing subqueries and CTEs: DELETE FROM test_fke
ROLLBACK; ROLLBACK;
RESET client_min_messages; RESET client_min_messages;
\set VERBOSITY terse
DROP SCHEMA test_fkey_to_ref_in_tx CASCADE; DROP SCHEMA test_fkey_to_ref_in_tx CASCADE;
NOTICE: drop cascades to 5 other objects NOTICE: drop cascades to 5 other objects
DETAIL: drop cascades to table transitive_reference_table \set VERBOSITY default
drop cascades to table on_update_fkey_table
drop cascades to table unrelated_dist_table
drop cascades to table reference_table
drop cascades to table distributed_table
SET search_path TO public; SET search_path TO public;

View File

@ -39,13 +39,9 @@ CREATE TABLE table_to_distribute (
json_data json, json_data json,
test_type_data dummy_type test_type_data dummy_type
); );
-- use the table WITH (OIDS) set
ALTER TABLE table_to_distribute SET WITH OIDS;
SELECT create_distributed_table('table_to_distribute', 'id', 'hash'); SELECT create_distributed_table('table_to_distribute', 'id', 'hash');
ERROR: cannot distribute relation: table_to_distribute ERROR: cannot create constraint on "table_to_distribute"
DETAIL: Distributed relations must not specify the WITH (OIDS) option in their definitions. DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE).
-- revert WITH (OIDS) from above
ALTER TABLE table_to_distribute SET WITHOUT OIDS;
-- use an index instead of table name -- use an index instead of table name
SELECT create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); SELECT create_distributed_table('table_to_distribute_pkey', 'id', 'hash');
ERROR: table_to_distribute_pkey is not a regular, foreign or partitioned table ERROR: table_to_distribute_pkey is not a regular, foreign or partitioned table

View File

@ -10,13 +10,13 @@ SELECT part_storage_type, part_key, part_replica_count, part_max_size,
t | l_orderkey | 2 | 1536000 | 2 t | l_orderkey | 2 | 1536000 | 2
(1 row) (1 row)
SELECT * FROM master_get_table_ddl_events('lineitem'); SELECT * FROM master_get_table_ddl_events('lineitem') order by 1;
master_get_table_ddl_events master_get_table_ddl_events
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
CREATE TABLE public.lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL) ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber)
ALTER TABLE public.lineitem OWNER TO postgres ALTER TABLE public.lineitem OWNER TO postgres
CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) TABLESPACE pg_default CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) TABLESPACE pg_default
ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber) CREATE TABLE public.lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL)
(4 rows) (4 rows)
SELECT * FROM master_get_new_shardid(); SELECT * FROM master_get_new_shardid();

View File

@ -26,12 +26,12 @@ SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s';
-- Show that, with no MX tables, metadata snapshot contains only the delete commands, -- Show that, with no MX tables, metadata snapshot contains only the delete commands,
-- pg_dist_node entries and reference tables -- pg_dist_node entries and reference tables
SELECT unnest(master_metadata_snapshot()); SELECT unnest(master_metadata_snapshot()) order by 1;
unnest unnest
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE TRUNCATE pg_dist_node CASCADE
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
(3 rows) (3 rows)
-- Create a test table with constraints and SERIAL -- Create a test table with constraints and SERIAL
@ -52,43 +52,43 @@ SELECT master_create_worker_shards('mx_test_table', 8, 1);
-- considered as an MX table -- considered as an MX table
UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass; UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass;
-- Show that the created MX table is included in the metadata snapshot -- Show that the created MX table is included in the metadata snapshot
SELECT unnest(master_metadata_snapshot()); SELECT unnest(master_metadata_snapshot()) order by 1;
unnest unnest
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
ALTER TABLE public.mx_test_table OWNER TO postgres
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
ALTER TABLE public.mx_test_table OWNER TO postgres ALTER TABLE public.mx_test_table OWNER TO postgres
ALTER TABLE public.mx_test_table OWNER TO postgres
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's') INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's')
SELECT worker_create_truncate_trigger('public.mx_test_table')
INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007)
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
SELECT worker_create_truncate_trigger('public.mx_test_table')
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE
(13 rows) (13 rows)
-- Show that CREATE INDEX commands are included in the metadata snapshot -- Show that CREATE INDEX commands are included in the metadata snapshot
CREATE INDEX mx_index ON mx_test_table(col_2); CREATE INDEX mx_index ON mx_test_table(col_2);
SELECT unnest(master_metadata_snapshot()); SELECT unnest(master_metadata_snapshot()) order by 1;
unnest unnest
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
ALTER TABLE public.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2) TABLESPACE pg_default
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
ALTER TABLE public.mx_test_table OWNER TO postgres ALTER TABLE public.mx_test_table OWNER TO postgres
ALTER TABLE public.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2) TABLESPACE pg_default
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's') INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's')
SELECT worker_create_truncate_trigger('public.mx_test_table')
INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007)
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
SELECT worker_create_truncate_trigger('public.mx_test_table')
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE
(14 rows) (14 rows)
-- Show that schema changes are included in the metadata snapshot -- Show that schema changes are included in the metadata snapshot
@ -96,23 +96,23 @@ CREATE SCHEMA mx_testing_schema;
ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema;
WARNING: not propagating ALTER ... SET SCHEMA commands to worker nodes WARNING: not propagating ALTER ... SET SCHEMA commands to worker nodes
HINT: Connect to worker nodes directly to manually change schemas of affected objects. HINT: Connect to worker nodes directly to manually change schemas of affected objects.
SELECT unnest(master_metadata_snapshot()); SELECT unnest(master_metadata_snapshot()) order by 1;
unnest unnest
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's') INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007)
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE
(14 rows) (14 rows)
-- Show that append distributed tables are not included in the metadata snapshot -- Show that append distributed tables are not included in the metadata snapshot
@ -124,44 +124,44 @@ SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append');
(1 row) (1 row)
UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass;
SELECT unnest(master_metadata_snapshot()); SELECT unnest(master_metadata_snapshot()) order by 1;
unnest unnest
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's') INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007)
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE
(14 rows) (14 rows)
-- Show that range distributed tables are not included in the metadata snapshot -- Show that range distributed tables are not included in the metadata snapshot
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
SELECT unnest(master_metadata_snapshot()); SELECT unnest(master_metadata_snapshot()) order by 1;
unnest unnest
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default')
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's') INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007)
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE')
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition
TRUNCATE pg_dist_node CASCADE
(14 rows) (14 rows)
-- Test start_metadata_sync_to_node UDF -- Test start_metadata_sync_to_node UDF

View File

@ -4,12 +4,11 @@
SET citus.next_shard_id TO 1660000; SET citus.next_shard_id TO 1660000;
SET citus.shard_count TO 4; SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
-- print major version number for version-specific tests
SHOW server_version \gset SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int AS server_version; SELECT substring(:'server_version', '\d+')::int > 10 AS server_version_above_ten;
server_version server_version_above_ten
---------------- --------------------------
11 t
(1 row) (1 row)
-- --
@ -540,7 +539,7 @@ CREATE INDEX partitioning_2009_index ON partitioning_test_2009(id);
-- CREATE INDEX CONCURRENTLY on partition -- CREATE INDEX CONCURRENTLY on partition
CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id); CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id);
-- see index is created -- see index is created
SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test%' ORDER BY indexname; SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test_%' ORDER BY indexname;
tablename | indexname tablename | indexname
---------------------------+---------------------------------- ---------------------------+----------------------------------
partitioning_test_2010 | partitioned_2010_index partitioning_test_2010 | partitioned_2010_index
@ -571,7 +570,7 @@ CREATE TABLE non_distributed_partitioned_table_1 PARTITION OF non_distributed_pa
FOR VALUES FROM (0) TO (10); FOR VALUES FROM (0) TO (10);
CREATE INDEX non_distributed_partitioned_table_index ON non_distributed_partitioned_table(a); CREATE INDEX non_distributed_partitioned_table_index ON non_distributed_partitioned_table(a);
-- see index is created -- see index is created
SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed%' ORDER BY indexname; SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed_partitioned_table_%' ORDER BY indexname;
tablename | indexname tablename | indexname
-------------------------------------+------------------------------------------- -------------------------------------+-------------------------------------------
non_distributed_partitioned_table_1 | non_distributed_partitioned_table_1_a_idx non_distributed_partitioned_table_1 | non_distributed_partitioned_table_1_a_idx

View File

@ -4,12 +4,11 @@
SET citus.next_shard_id TO 1660000; SET citus.next_shard_id TO 1660000;
SET citus.shard_count TO 4; SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
-- print major version number for version-specific tests
SHOW server_version \gset SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int AS server_version; SELECT substring(:'server_version', '\d+')::int > 10 AS server_version_above_ten;
server_version server_version_above_ten
---------------- --------------------------
10 f
(1 row) (1 row)
-- --
@ -543,7 +542,7 @@ CREATE INDEX partitioning_2009_index ON partitioning_test_2009(id);
-- CREATE INDEX CONCURRENTLY on partition -- CREATE INDEX CONCURRENTLY on partition
CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id); CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id);
-- see index is created -- see index is created
SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test%' ORDER BY indexname; SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test_%' ORDER BY indexname;
tablename | indexname tablename | indexname
------------------------+------------------------- ------------------------+-------------------------
partitioning_test_2010 | partitioned_2010_index partitioning_test_2010 | partitioned_2010_index
@ -568,7 +567,7 @@ FOR VALUES FROM (0) TO (10);
CREATE INDEX non_distributed_partitioned_table_index ON non_distributed_partitioned_table(a); CREATE INDEX non_distributed_partitioned_table_index ON non_distributed_partitioned_table(a);
ERROR: cannot create index on partitioned table "non_distributed_partitioned_table" ERROR: cannot create index on partitioned table "non_distributed_partitioned_table"
-- see index is created -- see index is created
SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed%' ORDER BY indexname; SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed_partitioned_table_%' ORDER BY indexname;
tablename | indexname tablename | indexname
-----------+----------- -----------+-----------
(0 rows) (0 rows)

View File

@ -4,12 +4,11 @@
SET citus.next_shard_id TO 1660000; SET citus.next_shard_id TO 1660000;
SET citus.shard_count TO 4; SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
-- print major version number for version-specific tests
SHOW server_version \gset SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int AS server_version; SELECT substring(:'server_version', '\d+')::int > 10 AS server_version_above_ten;
server_version server_version_above_ten
---------------- --------------------------
11 t
(1 row) (1 row)
-- --
@ -420,7 +419,7 @@ SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1;
-- create default partition -- create default partition
CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT; CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT;
\d+ partitioning_test \d+ partitioning_test
Table "public.partitioning_test" Partitioned table "public.partitioning_test"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
--------+---------+-----------+----------+---------+---------+--------------+------------- --------+---------+-----------+----------+---------+---------+--------------+-------------
id | integer | | | | plain | | id | integer | | | | plain | |
@ -540,7 +539,7 @@ CREATE INDEX partitioning_2009_index ON partitioning_test_2009(id);
-- CREATE INDEX CONCURRENTLY on partition -- CREATE INDEX CONCURRENTLY on partition
CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id); CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id);
-- see index is created -- see index is created
SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test%' ORDER BY indexname; SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test_%' ORDER BY indexname;
tablename | indexname tablename | indexname
---------------------------+---------------------------------- ---------------------------+----------------------------------
partitioning_test_2010 | partitioned_2010_index partitioning_test_2010 | partitioned_2010_index
@ -571,7 +570,7 @@ CREATE TABLE non_distributed_partitioned_table_1 PARTITION OF non_distributed_pa
FOR VALUES FROM (0) TO (10); FOR VALUES FROM (0) TO (10);
CREATE INDEX non_distributed_partitioned_table_index ON non_distributed_partitioned_table(a); CREATE INDEX non_distributed_partitioned_table_index ON non_distributed_partitioned_table(a);
-- see index is created -- see index is created
SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed%' ORDER BY indexname; SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'non_distributed_partitioned_table_%' ORDER BY indexname;
tablename | indexname tablename | indexname
-------------------------------------+------------------------------------------- -------------------------------------+-------------------------------------------
non_distributed_partitioned_table_1 | non_distributed_partitioned_table_1_a_idx non_distributed_partitioned_table_1 | non_distributed_partitioned_table_1_a_idx

View File

@ -1382,10 +1382,6 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_sche
(0 rows) (0 rows)
\c - - - :master_port \c - - - :master_port
-- as we expect, setting WITH OIDS does not work for reference tables
ALTER TABLE reference_schema.reference_table_ddl SET WITH OIDS;
ERROR: alter table command is currently unsupported
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ATTACH|DETACH PARTITION and TYPE subcommands are supported.
-- now test the renaming of the table, and back to the expected name -- now test the renaming of the table, and back to the expected name
ALTER TABLE reference_schema.reference_table_ddl RENAME TO reference_table_ddl_test; ALTER TABLE reference_schema.reference_table_ddl RENAME TO reference_table_ddl_test;
ALTER TABLE reference_schema.reference_table_ddl_test RENAME TO reference_table_ddl; ALTER TABLE reference_schema.reference_table_ddl_test RENAME TO reference_table_ddl;

View File

@ -87,7 +87,7 @@ DETAIL: Functions are not allowed in FROM clause when the query has subqueries
SELECT SELECT
user_id user_id
FROM FROM
(SELECT 5 AS user_id) users_reference_table (SELECT 5 AS user_id UNION ALL SELECT 6) users_reference_table
WHERE WHERE
NOT EXISTS NOT EXISTS
(SELECT (SELECT
@ -99,7 +99,7 @@ WHERE
) )
LIMIT 3; LIMIT 3;
ERROR: cannot pushdown the subquery ERROR: cannot pushdown the subquery
DETAIL: Subqueries without FROM are not allowed in FROM clause when the outer query has subqueries in WHERE clause and it references a column from another query DETAIL: Complex subqueries and CTEs are not allowed in the FROM clause when the query has subqueries in the WHERE clause and it references a column from another query
-- join with distributed table prevents FROM from recurring -- join with distributed table prevents FROM from recurring
SELECT SELECT
DISTINCT user_id DISTINCT user_id

View File

@ -188,10 +188,9 @@ VIETNAM
RUSSIA RUSSIA
UNITED KINGDOM UNITED KINGDOM
UNITED STATES UNITED STATES
-- Test that we can create on-commit drop tables, and also test creating with -- Test that we can create on-commit drop tables, along with changing column names
-- oids, along with changing column names
BEGIN; BEGIN;
CREATE TEMP TABLE customer_few (customer_key) WITH (OIDS) ON COMMIT DROP AS CREATE TEMP TABLE customer_few (customer_key) ON COMMIT DROP AS
(SELECT * FROM customer WHERE c_nationkey = 1 ORDER BY c_custkey LIMIT 10); (SELECT * FROM customer WHERE c_nationkey = 1 ORDER BY c_custkey LIMIT 10);
SELECT customer_key, c_name, c_address SELECT customer_key, c_name, c_address
FROM customer_few ORDER BY customer_key LIMIT 5; FROM customer_few ORDER BY customer_key LIMIT 5;

View File

@ -0,0 +1,368 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
\gset
\if :server_version_above_eleven
\else
\q
\endif
SET citus.shard_replication_factor to 1;
SET citus.next_shard_id TO 60000;
SET citus.next_placement_id TO 60000;
create schema test_pg12;
set search_path to test_pg12;
CREATE FUNCTION blackhole_am_handler(internal)
RETURNS table_am_handler
AS 'citus'
LANGUAGE C;
CREATE ACCESS METHOD blackhole_am TYPE TABLE HANDLER blackhole_am_handler;
create table test_am(id int, val int) using blackhole_am;
insert into test_am values (1, 1);
-- Custom table access methods should be rejected
select create_distributed_table('test_am','id');
ERROR: cannot distribute relations using non-heap access methods
-- Test generated columns
create table gen1 (
id int,
val1 int,
val2 int GENERATED ALWAYS AS (val1 + 2) STORED
);
create table gen2 (
id int,
val1 int,
val2 int GENERATED ALWAYS AS (val1 + 2) STORED
);
insert into gen1 (id, val1) values (1,4),(3,6),(5,2),(7,2);
insert into gen2 (id, val1) values (1,4),(3,6),(5,2),(7,2);
select * from create_distributed_table('gen1', 'id');
ERROR: cannot distribute relation: gen1
DETAIL: Distributed relations must not use GENERATED ALWAYS AS (...) STORED.
select * from create_distributed_table('gen2', 'val2');
ERROR: cannot distribute relation: gen2
DETAIL: Distributed relations must not use GENERATED ALWAYS AS (...) STORED.
insert into gen1 (id, val1) values (2,4),(4,6),(6,2),(8,2);
insert into gen2 (id, val1) values (2,4),(4,6),(6,2),(8,2);
select * from gen1;
id | val1 | val2
----+------+------
1 | 4 | 6
3 | 6 | 8
5 | 2 | 4
7 | 2 | 4
2 | 4 | 6
4 | 6 | 8
6 | 2 | 4
8 | 2 | 4
(8 rows)
select * from gen2;
id | val1 | val2
----+------+------
1 | 4 | 6
3 | 6 | 8
5 | 2 | 4
7 | 2 | 4
2 | 4 | 6
4 | 6 | 8
6 | 2 | 4
8 | 2 | 4
(8 rows)
-- Test new VACUUM/ANALYZE options
analyze (skip_locked) gen1;
vacuum (skip_locked) gen1;
vacuum (truncate 0) gen1;
vacuum (index_cleanup 1) gen1;
-- COPY FROM
create table cptest (id int, val int);
select create_distributed_table('cptest', 'id');
create_distributed_table
--------------------------
(1 row)
copy cptest from STDIN with csv where val < 4;
ERROR: Citus does not support COPY FROM with WHERE
1,6
2,3
3,2
4,9
5,4
\.
invalid command \.
select sum(id), sum(val) from cptest;
ERROR: syntax error at or near "1"
LINE 1: 1,6
^
-- CTE materialized/not materialized
CREATE TABLE single_hash_repartition_first (id int, sum int, avg float);
CREATE TABLE single_hash_repartition_second (id int primary key, sum int, avg float);
SELECT create_distributed_table('single_hash_repartition_first', 'id');
create_distributed_table
--------------------------
(1 row)
SELECT create_distributed_table('single_hash_repartition_second', 'id');
create_distributed_table
--------------------------
(1 row)
INSERT INTO single_hash_repartition_first
SELECT i, i * 3, i * 0.3
FROM generate_series(0, 100) i;
INSERT INTO single_hash_repartition_second
SELECT i * 2, i * 5, i * 0.6
FROM generate_series(0, 100) i;
-- a sample router query with NOT MATERIALIZED
-- which pushes down the filters to the CTE
SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
WITH cte1 AS NOT MATERIALIZED
(
SELECT id
FROM single_hash_repartition_first t1
)
SELECT count(*)
FROM cte1, single_hash_repartition_second
WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45;
$Q$);
coordinator_plan
------------------------------
Custom Scan (Citus Adaptive)
Task Count: 1
(2 rows)
-- same query, without NOT MATERIALIZED, which is already default
-- which pushes down the filters to the CTE
SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
WITH cte1 AS
(
SELECT id
FROM single_hash_repartition_first t1
)
SELECT count(*)
FROM cte1, single_hash_repartition_second
WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45;
$Q$);
coordinator_plan
------------------------------
Custom Scan (Citus Adaptive)
Task Count: 1
(2 rows)
-- same query with MATERIALIZED
-- which prevents pushing down filters to the CTE,
-- thus becoming a real-time query
SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
WITH cte1 AS MATERIALIZED
(
SELECT id
FROM single_hash_repartition_first t1
)
SELECT count(*)
FROM cte1, single_hash_repartition_second
WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45;
$Q$);
coordinator_plan
------------------------------------------
Custom Scan (Citus Adaptive)
-> Distributed Subplan 5_1
-> Custom Scan (Citus Adaptive)
Task Count: 4
(4 rows)
-- similar query with MATERIALIZED
-- now manually have the same filter in the CTE
-- thus becoming a router query again
SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
WITH cte1 AS MATERIALIZED
(
SELECT id
FROM single_hash_repartition_first t1
WHERE id = 45
)
SELECT count(*)
FROM cte1, single_hash_repartition_second
WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.id = 45;
$Q$);
coordinator_plan
------------------------------
Custom Scan (Citus Adaptive)
Task Count: 1
(2 rows)
-- now, have a real-time query without MATERIALIZED
-- these are sanitiy checks, because all of the CTEs are recursively
-- planned and there is no benefit that Citus can have
SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
WITH cte1 AS MATERIALIZED
(
SELECT id
FROM single_hash_repartition_first t1
WHERE sum = 45
)
SELECT count(*)
FROM cte1, single_hash_repartition_second
WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.sum = 45;
$Q$);
coordinator_plan
------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
-> Distributed Subplan 8_1
-> Custom Scan (Citus Adaptive)
Task Count: 4
(5 rows)
SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
WITH cte1 AS NOT MATERIALIZED
(
SELECT id
FROM single_hash_repartition_first t1
WHERE sum = 45
)
SELECT count(*)
FROM cte1, single_hash_repartition_second
WHERE cte1.id = single_hash_repartition_second.id AND single_hash_repartition_second.sum = 45;
$Q$);
coordinator_plan
------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
-> Distributed Subplan 10_1
-> Custom Scan (Citus Adaptive)
Task Count: 4
(5 rows)
-- Foreign keys to partition tables
CREATE TABLE collections_list (
key bigint,
collection_id integer,
value numeric,
PRIMARY KEY(key, collection_id)
) PARTITION BY LIST (collection_id);
CREATE TABLE collections_list_0
PARTITION OF collections_list (key, collection_id, value)
FOR VALUES IN ( 0 );
CREATE TABLE collections_list_1
PARTITION OF collections_list (key, collection_id, value)
FOR VALUES IN ( 1 );
CREATE TABLE collection_users
(used_id integer, collection_id integer, key bigint);
ALTER TABLE collection_users
ADD CONSTRAINT collection_users_fkey FOREIGN KEY (key, collection_id) REFERENCES collections_list (key, collection_id);
-- sanity check for postgres
INSERT INTO collections_list VALUES (1, 0, '1.1');
INSERT INTO collection_users VALUES (1, 0, 1);
-- should fail because of fkey
INSERT INTO collection_users VALUES (1, 1000, 1);
ERROR: insert or update on table "collection_users" violates foreign key constraint "collection_users_fkey"
DETAIL: Key (key, collection_id)=(1, 1000) is not present in table "collections_list".
SELECT create_distributed_table('collections_list', 'key');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
SELECT create_distributed_table('collection_users', 'key');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
-- should still fail because of fkey
INSERT INTO collection_users VALUES (1, 1000, 1);
ERROR: insert or update on table "collection_users_60024" violates foreign key constraint "collection_users_fkey_60024"
DETAIL: Key (key, collection_id)=(1, 1000) is not present in table "collections_list_60012".
CONTEXT: while executing command on localhost:57637
-- whereas new record with partition should go through
INSERT INTO collections_list VALUES (2, 1, '1.2');
INSERT INTO collection_users VALUES (5, 1, 2);
-- AND CHAIN
CREATE TABLE test (x int, y int);
INSERT INTO test (x,y) SELECT i,i*3 from generate_series(1, 100) i;
SELECT create_distributed_table('test', 'x');
NOTICE: Copying data from local table...
create_distributed_table
--------------------------
(1 row)
-- single shard queries with CHAIN
BEGIN;
UPDATE test SET y = 15 WHERE x = 1;
COMMIT AND CHAIN;
SELECT * FROM test WHERE x = 1;
x | y
---+----
1 | 15
(1 row)
COMMIT;
BEGIN;
UPDATE test SET y = 20 WHERE x = 1;
ROLLBACK AND CHAIN;
SELECT * FROM test WHERE x = 1;
x | y
---+----
1 | 15
(1 row)
COMMIT;
-- multi shard queries with CHAIN
BEGIN;
UPDATE test SET y = 25;
COMMIT AND CHAIN;
SELECT DISTINCT y FROM test;
y
----
25
(1 row)
COMMIT;
BEGIN;
UPDATE test SET y = 30;
ROLLBACK AND CHAIN;
SELECT DISTINCT y FROM test;
y
----
25
(1 row)
COMMIT;
-- does read only carry over?
BEGIN READ ONLY;
COMMIT AND CHAIN;
UPDATE test SET y = 35;
ERROR: cannot execute UPDATE in a read-only transaction
COMMIT;
SELECT DISTINCT y FROM test;
y
----
25
(1 row)
BEGIN READ ONLY;
ROLLBACK AND CHAIN;
UPDATE test SET y = 40;
ERROR: cannot execute UPDATE in a read-only transaction
COMMIT;
SELECT DISTINCT y FROM test;
y
----
25
(1 row)
\set VERBOSITY terse
drop schema test_pg12 cascade;
NOTICE: drop cascades to 11 other objects
\set VERBOSITY default
SET citus.shard_replication_factor to 2;

View File

@ -0,0 +1,6 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
\gset
\if :server_version_above_eleven
\else
\q

View File

@ -98,7 +98,7 @@ $$;
CALL test_procedure_modify_insert(2,12); CALL test_procedure_modify_insert(2,12);
ERROR: duplicate key value violates unique constraint "idx_table_100503" ERROR: duplicate key value violates unique constraint "idx_table_100503"
DETAIL: Key (id, org_id)=(2, 12) already exists. DETAIL: Key (id, org_id)=(2, 12) already exists.
CONTEXT: while executing command on localhost:57637 CONTEXT: while executing command on localhost:57638
SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)"
PL/pgSQL function test_procedure_modify_insert(integer,integer) line 5 at SQL statement PL/pgSQL function test_procedure_modify_insert(integer,integer) line 5 at SQL statement
SELECT * FROM test_table ORDER BY 1, 2; SELECT * FROM test_table ORDER BY 1, 2;
@ -118,7 +118,7 @@ $$;
CALL test_procedure_modify_insert_commit(2,30); CALL test_procedure_modify_insert_commit(2,30);
ERROR: duplicate key value violates unique constraint "idx_table_100503" ERROR: duplicate key value violates unique constraint "idx_table_100503"
DETAIL: Key (id, org_id)=(2, 30) already exists. DETAIL: Key (id, org_id)=(2, 30) already exists.
CONTEXT: while executing command on localhost:57637 CONTEXT: while executing command on localhost:57638
SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)" SQL statement "INSERT INTO test_table VALUES (tt_id, tt_org_id)"
PL/pgSQL function test_procedure_modify_insert_commit(integer,integer) line 5 at SQL statement PL/pgSQL function test_procedure_modify_insert_commit(integer,integer) line 5 at SQL statement
SELECT * FROM test_table ORDER BY 1, 2; SELECT * FROM test_table ORDER BY 1, 2;
@ -209,14 +209,8 @@ SELECT * from test_table;
----+-------- ----+--------
(0 rows) (0 rows)
\set VERBOSITY terse
DROP SCHEMA procedure_schema CASCADE; DROP SCHEMA procedure_schema CASCADE;
NOTICE: drop cascades to 8 other objects NOTICE: drop cascades to 8 other objects
DETAIL: drop cascades to table test_table \set VERBOSITY default
drop cascades to function test_procedure_delete_insert(integer,integer)
drop cascades to function test_procedure_modify_insert(integer,integer)
drop cascades to function test_procedure_modify_insert_commit(integer,integer)
drop cascades to function test_procedure_rollback_3(integer,integer)
drop cascades to function test_procedure_rollback(integer,integer)
drop cascades to function test_procedure_rollback_2(integer,integer)
drop cascades to function test_procedure(integer,integer)
RESET SEARCH_PATH; RESET SEARCH_PATH;

View File

@ -277,6 +277,8 @@ SELECT * from test_table;
----+-------- ----+--------
(0 rows) (0 rows)
\set VERBOSITY terse
DROP SCHEMA procedure_schema CASCADE; DROP SCHEMA procedure_schema CASCADE;
NOTICE: drop cascades to table test_table NOTICE: drop cascades to table test_table
\set VERBOSITY default
RESET SEARCH_PATH; RESET SEARCH_PATH;

View File

@ -39,17 +39,18 @@ ERROR: cannot pushdown the subquery
DETAIL: Complex subqueries and CTEs are not allowed in the FROM clause when the query has subqueries in the WHERE clause and it references a column from another query DETAIL: Complex subqueries and CTEs are not allowed in the FROM clause when the query has subqueries in the WHERE clause and it references a column from another query
-- Recurring tuples as empty join tree -- Recurring tuples as empty join tree
SELECT * SELECT *
FROM (SELECT 1 AS id, FROM (SELECT 1 AS id, 2 AS value_1, 3 AS value_3
2 AS value_1, UNION ALL SELECT 2 as id, 3 as value_1, 4 as value_3) AS tt1
3 AS value_3) AS tt1
WHERE id IN (SELECT user_id WHERE id IN (SELECT user_id
FROM events_table); FROM events_table);
DEBUG: generating subplan 6_1 for subquery SELECT user_id FROM public.events_table DEBUG: generating subplan 6_1 for subquery SELECT 1 AS id, 2 AS value_1, 3 AS value_3 UNION ALL SELECT 2 AS id, 3 AS value_1, 4 AS value_3
DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT id, value_1, value_3 FROM (SELECT 1 AS id, 2 AS value_1, 3 AS value_3) tt1 WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) DEBUG: generating subplan 6_2 for subquery SELECT user_id FROM public.events_table
DEBUG: Plan 6 query after replacing subqueries and CTEs: SELECT id, value_1, value_3 FROM (SELECT intermediate_result.id, intermediate_result.value_1, intermediate_result.value_3 FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value_1 integer, value_3 integer)) tt1 WHERE (id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('6_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))
id | value_1 | value_3 id | value_1 | value_3
----+---------+--------- ----+---------+---------
1 | 2 | 3 1 | 2 | 3
(1 row) 2 | 3 | 4
(2 rows)
-- Recurring tuples in from clause as CTE and SET operation in WHERE clause -- Recurring tuples in from clause as CTE and SET operation in WHERE clause
SELECT Count(*) SELECT Count(*)

Some files were not shown because too many files have changed in this diff Show More