mirror of https://github.com/citusdata/citus.git
Support PostgreSQL 10 (#1379)
Adds support for PostgreSQL 10 by copying in the requisite ruleutils and updating all API usages to conform with changes in PostgreSQL 10. Most changes are fairly minor but they are numerous. One particular obstacle was the change in \d behavior in PostgreSQL 10's psql; I had to add SQL implementations (views, mostly) to mimic the pre-10 output.pull/1463/head
parent
71a4e90e82
commit
2204da19f0
|
@ -30,4 +30,5 @@ src/backend/distributed/utils/citus_read.c -citus-style
|
||||||
src/backend/distributed/utils/citus_readfuncs_95.c -citus-style
|
src/backend/distributed/utils/citus_readfuncs_95.c -citus-style
|
||||||
src/backend/distributed/utils/ruleutils_95.c -citus-style
|
src/backend/distributed/utils/ruleutils_95.c -citus-style
|
||||||
src/backend/distributed/utils/ruleutils_96.c -citus-style
|
src/backend/distributed/utils/ruleutils_96.c -citus-style
|
||||||
|
src/backend/distributed/utils/ruleutils_10.c -citus-style
|
||||||
src/include/distributed/citus_nodes.h -citus-style
|
src/include/distributed/citus_nodes.h -citus-style
|
||||||
|
|
|
@ -16,6 +16,7 @@ env:
|
||||||
matrix:
|
matrix:
|
||||||
- PGVERSION=9.5
|
- PGVERSION=9.5
|
||||||
- PGVERSION=9.6
|
- PGVERSION=9.6
|
||||||
|
- PGVERSION=10
|
||||||
before_install:
|
before_install:
|
||||||
- git clone -b v0.6.2 --depth 1 https://github.com/citusdata/tools.git
|
- git clone -b v0.6.2 --depth 1 https://github.com/citusdata/tools.git
|
||||||
- sudo make -C tools install
|
- sudo make -C tools install
|
||||||
|
@ -27,7 +28,7 @@ install:
|
||||||
- install_pg
|
- install_pg
|
||||||
- install_custom_pg
|
- install_custom_pg
|
||||||
# download and install HLL manually, as custom builds won't satisfy deps
|
# download and install HLL manually, as custom builds won't satisfy deps
|
||||||
- apt-get download "postgresql-${PGVERSION}-hll=2.10.1.citus-1" && sudo dpkg --force-confold --force-confdef --force-all -i *hll*.deb
|
- apt-get download "postgresql-${PGVERSION}-hll=2.10.2.citus-1" && sudo dpkg --force-confold --force-confdef --force-all -i *hll*.deb
|
||||||
before_script: citus_indent --quiet --check
|
before_script: citus_indent --quiet --check
|
||||||
script: CFLAGS=-Werror pg_travis_multi_test check
|
script: CFLAGS=-Werror pg_travis_multi_test check
|
||||||
after_success:
|
after_success:
|
||||||
|
|
|
@ -1996,13 +1996,16 @@ fi
|
||||||
# check we're building against a supported version of PostgreSQL
|
# check we're building against a supported version of PostgreSQL
|
||||||
citusac_pg_config_version=$($PG_CONFIG --version 2>/dev/null)
|
citusac_pg_config_version=$($PG_CONFIG --version 2>/dev/null)
|
||||||
version_num=$(echo "$citusac_pg_config_version"|
|
version_num=$(echo "$citusac_pg_config_version"|
|
||||||
$SED -e 's/^PostgreSQL \([0-9]*\)\.\([0-9]*\)\([a-zA-Z0-9.]*\)$/\1.\2/')
|
$SED -e 's/^PostgreSQL \([0-9]*\)\(\.[0-9]*\)\{0,1\}\(.*\)$/\1\2/')
|
||||||
|
|
||||||
|
# if PostgreSQL version starts with two digits, the major version is those digits
|
||||||
|
version_num=$(echo "$version_num"| $SED -e 's/^\([0-9]\{2\}\)\(.*\)$/\1/')
|
||||||
|
|
||||||
if test -z "$version_num"; then
|
if test -z "$version_num"; then
|
||||||
as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5
|
as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test "$version_num" != '9.5' -a "$version_num" != '9.6'; then
|
if test "$version_num" != '9.5' -a "$version_num" != '9.6' -a "$version_num" != '10'; then
|
||||||
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
|
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
|
||||||
else
|
else
|
||||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
|
||||||
|
|
|
@ -50,13 +50,16 @@ fi
|
||||||
# check we're building against a supported version of PostgreSQL
|
# check we're building against a supported version of PostgreSQL
|
||||||
citusac_pg_config_version=$($PG_CONFIG --version 2>/dev/null)
|
citusac_pg_config_version=$($PG_CONFIG --version 2>/dev/null)
|
||||||
version_num=$(echo "$citusac_pg_config_version"|
|
version_num=$(echo "$citusac_pg_config_version"|
|
||||||
$SED -e 's/^PostgreSQL \([[0-9]]*\)\.\([[0-9]]*\)\([[a-zA-Z0-9.]]*\)$/\1.\2/')
|
$SED -e 's/^PostgreSQL \([[0-9]]*\)\(\.[[0-9]]*\)\{0,1\}\(.*\)$/\1\2/')
|
||||||
|
|
||||||
|
# if PostgreSQL version starts with two digits, the major version is those digits
|
||||||
|
version_num=$(echo "$version_num"| $SED -e 's/^\([[0-9]]\{2\}\)\(.*\)$/\1/')
|
||||||
|
|
||||||
if test -z "$version_num"; then
|
if test -z "$version_num"; then
|
||||||
AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.])
|
AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test "$version_num" != '9.5' -a "$version_num" != '9.6'; then
|
if test "$version_num" != '9.5' -a "$version_num" != '9.6' -a "$version_num" != '10'; then
|
||||||
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
|
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
|
||||||
else
|
else
|
||||||
AC_MSG_NOTICE([building against PostgreSQL $version_num])
|
AC_MSG_NOTICE([building against PostgreSQL $version_num])
|
||||||
|
|
|
@ -84,6 +84,9 @@ static void CreateHashDistributedTable(Oid relationId, char *distributionColumnN
|
||||||
static Oid ColumnType(Oid relationId, char *columnName);
|
static Oid ColumnType(Oid relationId, char *columnName);
|
||||||
static void CopyLocalDataIntoShards(Oid relationId);
|
static void CopyLocalDataIntoShards(Oid relationId);
|
||||||
static List * TupleDescColumnNameList(TupleDesc tupleDescriptor);
|
static List * TupleDescColumnNameList(TupleDesc tupleDescriptor);
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
static bool RelationUsesIdentityColumns(TupleDesc relationDesc);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* exports for SQL callable functions */
|
/* exports for SQL callable functions */
|
||||||
PG_FUNCTION_INFO_V1(master_create_distributed_table);
|
PG_FUNCTION_INFO_V1(master_create_distributed_table);
|
||||||
|
@ -349,6 +352,23 @@ ConvertToDistributedTable(Oid relationId, char *distributionColumnName,
|
||||||
"foreign tables.")));
|
"foreign tables.")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
if (relation->rd_rel->relispartition)
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||||
|
errmsg("cannot distribute relation: %s", relationName),
|
||||||
|
errdetail("Distributing partition tables is unsupported.")));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (RelationUsesIdentityColumns(relationDesc))
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||||
|
errmsg("cannot distribute relation: %s", relationName),
|
||||||
|
errdetail("Distributed relations must not use GENERATED "
|
||||||
|
"... AS IDENTITY.")));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* check that table is empty if that is required */
|
/* check that table is empty if that is required */
|
||||||
if (requireEmpty && !LocalTableEmpty(relationId))
|
if (requireEmpty && !LocalTableEmpty(relationId))
|
||||||
{
|
{
|
||||||
|
@ -874,3 +894,30 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
|
||||||
|
|
||||||
return columnNameList;
|
return columnNameList;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RelationUsesIdentityColumns returns whether a given relation uses the SQL
|
||||||
|
* GENERATED ... AS IDENTITY features supported as of PostgreSQL 10.
|
||||||
|
*/
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
static bool
|
||||||
|
RelationUsesIdentityColumns(TupleDesc relationDesc)
|
||||||
|
{
|
||||||
|
int attributeIndex = 0;
|
||||||
|
|
||||||
|
for (attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++)
|
||||||
|
{
|
||||||
|
Form_pg_attribute attributeForm = relationDesc->attrs[attributeIndex];
|
||||||
|
|
||||||
|
if (attributeForm->attidentity != '\0')
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
|
@ -356,11 +356,21 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
|
||||||
dest->rStartup(dest, 0, tupleDescriptor);
|
dest->rStartup(dest, 0, tupleDescriptor);
|
||||||
|
|
||||||
/* initialize copy state to read from COPY data source */
|
/* initialize copy state to read from COPY data source */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
copyState = BeginCopyFrom(NULL,
|
||||||
|
distributedRelation,
|
||||||
|
copyStatement->filename,
|
||||||
|
copyStatement->is_program,
|
||||||
|
NULL,
|
||||||
|
copyStatement->attlist,
|
||||||
|
copyStatement->options);
|
||||||
|
#else
|
||||||
copyState = BeginCopyFrom(distributedRelation,
|
copyState = BeginCopyFrom(distributedRelation,
|
||||||
copyStatement->filename,
|
copyStatement->filename,
|
||||||
copyStatement->is_program,
|
copyStatement->is_program,
|
||||||
copyStatement->attlist,
|
copyStatement->attlist,
|
||||||
copyStatement->options);
|
copyStatement->options);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* set up callback to identify error line number */
|
/* set up callback to identify error line number */
|
||||||
errorCallback.callback = CopyFromErrorCallback;
|
errorCallback.callback = CopyFromErrorCallback;
|
||||||
|
@ -455,11 +465,21 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId)
|
||||||
(ShardConnections *) palloc0(sizeof(ShardConnections));
|
(ShardConnections *) palloc0(sizeof(ShardConnections));
|
||||||
|
|
||||||
/* initialize copy state to read from COPY data source */
|
/* initialize copy state to read from COPY data source */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
CopyState copyState = BeginCopyFrom(NULL,
|
||||||
|
distributedRelation,
|
||||||
|
copyStatement->filename,
|
||||||
|
copyStatement->is_program,
|
||||||
|
NULL,
|
||||||
|
copyStatement->attlist,
|
||||||
|
copyStatement->options);
|
||||||
|
#else
|
||||||
CopyState copyState = BeginCopyFrom(distributedRelation,
|
CopyState copyState = BeginCopyFrom(distributedRelation,
|
||||||
copyStatement->filename,
|
copyStatement->filename,
|
||||||
copyStatement->is_program,
|
copyStatement->is_program,
|
||||||
copyStatement->attlist,
|
copyStatement->attlist,
|
||||||
copyStatement->options);
|
copyStatement->options);
|
||||||
|
#endif
|
||||||
|
|
||||||
CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
|
CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
|
||||||
copyOutState->delim = (char *) delimiterCharacter;
|
copyOutState->delim = (char *) delimiterCharacter;
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
|
|
||||||
#include "postgres.h"
|
#include "postgres.h"
|
||||||
#include "miscadmin.h"
|
#include "miscadmin.h"
|
||||||
|
#include "pgstat.h"
|
||||||
|
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
|
@ -54,7 +55,13 @@ RedirectCopyDataToRegularFile(const char *filename)
|
||||||
/* if received data has contents, append to regular file */
|
/* if received data has contents, append to regular file */
|
||||||
if (copyData->len > 0)
|
if (copyData->len > 0)
|
||||||
{
|
{
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
int appended = FileWrite(fileDesc, copyData->data, copyData->len,
|
||||||
|
PG_WAIT_IO);
|
||||||
|
#else
|
||||||
int appended = FileWrite(fileDesc, copyData->data, copyData->len);
|
int appended = FileWrite(fileDesc, copyData->data, copyData->len);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (appended != copyData->len)
|
if (appended != copyData->len)
|
||||||
{
|
{
|
||||||
ereport(ERROR, (errcode_for_file_access(),
|
ereport(ERROR, (errcode_for_file_access(),
|
||||||
|
@ -98,7 +105,12 @@ SendRegularFile(const char *filename)
|
||||||
|
|
||||||
SendCopyOutStart();
|
SendCopyOutStart();
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize, PG_WAIT_IO);
|
||||||
|
#else
|
||||||
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize);
|
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize);
|
||||||
|
#endif
|
||||||
|
|
||||||
while (readBytes > 0)
|
while (readBytes > 0)
|
||||||
{
|
{
|
||||||
fileBuffer->len = readBytes;
|
fileBuffer->len = readBytes;
|
||||||
|
@ -106,7 +118,12 @@ SendRegularFile(const char *filename)
|
||||||
SendCopyData(fileBuffer);
|
SendCopyData(fileBuffer);
|
||||||
|
|
||||||
resetStringInfo(fileBuffer);
|
resetStringInfo(fileBuffer);
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize,
|
||||||
|
PG_WAIT_IO);
|
||||||
|
#else
|
||||||
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize);
|
readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
SendCopyDone();
|
SendCopyDone();
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "postgres.h"
|
#include "postgres.h"
|
||||||
|
#include "pgstat.h"
|
||||||
|
|
||||||
#include "libpq-fe.h"
|
#include "libpq-fe.h"
|
||||||
|
|
||||||
|
@ -434,7 +435,12 @@ GetRemoteCommandResult(MultiConnection *connection, bool raiseInterrupts)
|
||||||
/* this means we have to wait for data to go out */
|
/* this means we have to wait for data to go out */
|
||||||
Assert(rc == 1);
|
Assert(rc == 1);
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
rc = WaitLatchOrSocket(MyLatch, waitFlags | WL_SOCKET_WRITEABLE, socket, 0,
|
||||||
|
PG_WAIT_EXTENSION);
|
||||||
|
#else
|
||||||
rc = WaitLatchOrSocket(MyLatch, waitFlags | WL_SOCKET_WRITEABLE, socket, 0);
|
rc = WaitLatchOrSocket(MyLatch, waitFlags | WL_SOCKET_WRITEABLE, socket, 0);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (rc & WL_POSTMASTER_DEATH)
|
if (rc & WL_POSTMASTER_DEATH)
|
||||||
{
|
{
|
||||||
|
@ -484,7 +490,12 @@ GetRemoteCommandResult(MultiConnection *connection, bool raiseInterrupts)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
rc = WaitLatchOrSocket(MyLatch, waitFlags | WL_SOCKET_READABLE, socket, 0,
|
||||||
|
PG_WAIT_EXTENSION);
|
||||||
|
#else
|
||||||
rc = WaitLatchOrSocket(MyLatch, waitFlags | WL_SOCKET_READABLE, socket, 0);
|
rc = WaitLatchOrSocket(MyLatch, waitFlags | WL_SOCKET_READABLE, socket, 0);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (rc & WL_POSTMASTER_DEATH)
|
if (rc & WL_POSTMASTER_DEATH)
|
||||||
{
|
{
|
||||||
|
|
|
@ -152,6 +152,10 @@ ExecuteIntoDestReceiver(Query *query, ParamListInfo params, DestReceiver *dest)
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
PortalStart(portal, params, eflags, GetActiveSnapshot());
|
PortalStart(portal, params, eflags, GetActiveSnapshot());
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
PortalRun(portal, count, false, true, dest, dest, NULL);
|
||||||
|
#else
|
||||||
PortalRun(portal, count, false, dest, dest, NULL);
|
PortalRun(portal, count, false, dest, dest, NULL);
|
||||||
|
#endif
|
||||||
PortalDrop(portal, false);
|
PortalDrop(portal, false);
|
||||||
}
|
}
|
||||||
|
|
|
@ -320,7 +320,15 @@ LoadTuplesIntoTupleStore(CitusScanState *citusScanState, Job *workerJob)
|
||||||
|
|
||||||
if (BinaryMasterCopyFormat)
|
if (BinaryMasterCopyFormat)
|
||||||
{
|
{
|
||||||
DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary"));
|
DefElem *copyOption = NULL;
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
int location = -1; /* "unknown" token location */
|
||||||
|
copyOption = makeDefElem("format", (Node *) makeString("binary"), location);
|
||||||
|
#else
|
||||||
|
copyOption = makeDefElem("format", (Node *) makeString("binary"));
|
||||||
|
#endif
|
||||||
|
|
||||||
copyOptions = lappend(copyOptions, copyOption);
|
copyOptions = lappend(copyOptions, copyOption);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,8 +342,13 @@ LoadTuplesIntoTupleStore(CitusScanState *citusScanState, Job *workerJob)
|
||||||
jobDirectoryName = MasterJobDirectoryName(workerTask->jobId);
|
jobDirectoryName = MasterJobDirectoryName(workerTask->jobId);
|
||||||
taskFilename = TaskFilename(jobDirectoryName, workerTask->taskId);
|
taskFilename = TaskFilename(jobDirectoryName, workerTask->taskId);
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
copyState = BeginCopyFrom(NULL, stubRelation, taskFilename->data, false, NULL,
|
||||||
|
NULL, copyOptions);
|
||||||
|
#else
|
||||||
copyState = BeginCopyFrom(stubRelation, taskFilename->data, false, NULL,
|
copyState = BeginCopyFrom(stubRelation, taskFilename->data, false, NULL,
|
||||||
copyOptions);
|
copyOptions);
|
||||||
|
#endif
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
|
|
|
@ -163,6 +163,49 @@ static void PostProcessUtility(Node *parsetree);
|
||||||
static bool warnedUserAbout2PC = false;
|
static bool warnedUserAbout2PC = false;
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* multi_ProcessUtility9x is the 9.x-compatible wrapper for Citus' main utility
|
||||||
|
* hook. It simply adapts the old-style hook to call into the new-style (10+)
|
||||||
|
* hook, which is what now houses all actual logic.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
multi_ProcessUtility9x(Node *parsetree,
|
||||||
|
const char *queryString,
|
||||||
|
ProcessUtilityContext context,
|
||||||
|
ParamListInfo params,
|
||||||
|
DestReceiver *dest,
|
||||||
|
char *completionTag)
|
||||||
|
{
|
||||||
|
PlannedStmt *plannedStmt = makeNode(PlannedStmt);
|
||||||
|
plannedStmt->commandType = CMD_UTILITY;
|
||||||
|
plannedStmt->utilityStmt = parsetree;
|
||||||
|
|
||||||
|
multi_ProcessUtility(plannedStmt, queryString, context, params, NULL, dest,
|
||||||
|
completionTag);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CitusProcessUtility is a version-aware wrapper of ProcessUtility to account
|
||||||
|
* for argument differences between the 9.x and 10+ PostgreSQL versions.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
CitusProcessUtility(Node *node, const char *queryString, ProcessUtilityContext context,
|
||||||
|
ParamListInfo params, DestReceiver *dest, char *completionTag)
|
||||||
|
{
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
PlannedStmt *plannedStmt = makeNode(PlannedStmt);
|
||||||
|
plannedStmt->commandType = CMD_UTILITY;
|
||||||
|
plannedStmt->utilityStmt = node;
|
||||||
|
|
||||||
|
ProcessUtility(plannedStmt, queryString, context, params, NULL, dest,
|
||||||
|
completionTag);
|
||||||
|
#else
|
||||||
|
ProcessUtility(node, queryString, context, params, dest, completionTag);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* multi_ProcessUtility is the main entry hook for implementing Citus-specific
|
* multi_ProcessUtility is the main entry hook for implementing Citus-specific
|
||||||
* utility behavior. Its primary responsibilities are intercepting COPY and DDL
|
* utility behavior. Its primary responsibilities are intercepting COPY and DDL
|
||||||
|
@ -173,13 +216,15 @@ static bool warnedUserAbout2PC = false;
|
||||||
* TRUNCATE and VACUUM are also supported.
|
* TRUNCATE and VACUUM are also supported.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
multi_ProcessUtility(Node *parsetree,
|
multi_ProcessUtility(PlannedStmt *pstmt,
|
||||||
const char *queryString,
|
const char *queryString,
|
||||||
ProcessUtilityContext context,
|
ProcessUtilityContext context,
|
||||||
ParamListInfo params,
|
ParamListInfo params,
|
||||||
|
struct QueryEnvironment *queryEnv,
|
||||||
DestReceiver *dest,
|
DestReceiver *dest,
|
||||||
char *completionTag)
|
char *completionTag)
|
||||||
{
|
{
|
||||||
|
Node *parsetree = pstmt->utilityStmt;
|
||||||
bool commandMustRunAsOwner = false;
|
bool commandMustRunAsOwner = false;
|
||||||
Oid savedUserId = InvalidOid;
|
Oid savedUserId = InvalidOid;
|
||||||
int savedSecurityContext = 0;
|
int savedSecurityContext = 0;
|
||||||
|
@ -194,8 +239,13 @@ multi_ProcessUtility(Node *parsetree,
|
||||||
* that state. Since we never need to intercept transaction statements,
|
* that state. Since we never need to intercept transaction statements,
|
||||||
* skip our checks and immediately fall into standard_ProcessUtility.
|
* skip our checks and immediately fall into standard_ProcessUtility.
|
||||||
*/
|
*/
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
standard_ProcessUtility(pstmt, queryString, context,
|
||||||
|
params, queryEnv, dest, completionTag);
|
||||||
|
#else
|
||||||
standard_ProcessUtility(parsetree, queryString, context,
|
standard_ProcessUtility(parsetree, queryString, context,
|
||||||
params, dest, completionTag);
|
params, dest, completionTag);
|
||||||
|
#endif
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -213,8 +263,13 @@ multi_ProcessUtility(Node *parsetree,
|
||||||
* Ensure that utility commands do not behave any differently until CREATE
|
* Ensure that utility commands do not behave any differently until CREATE
|
||||||
* EXTENSION is invoked.
|
* EXTENSION is invoked.
|
||||||
*/
|
*/
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
standard_ProcessUtility(pstmt, queryString, context,
|
||||||
|
params, queryEnv, dest, completionTag);
|
||||||
|
#else
|
||||||
standard_ProcessUtility(parsetree, queryString, context,
|
standard_ProcessUtility(parsetree, queryString, context,
|
||||||
params, dest, completionTag);
|
params, dest, completionTag);
|
||||||
|
#endif
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -280,9 +335,14 @@ multi_ProcessUtility(Node *parsetree,
|
||||||
{
|
{
|
||||||
if (IsA(parsetree, IndexStmt))
|
if (IsA(parsetree, IndexStmt))
|
||||||
{
|
{
|
||||||
|
MemoryContext oldContext = MemoryContextSwitchTo(GetMemoryChunkContext(
|
||||||
|
parsetree));
|
||||||
|
|
||||||
/* copy parse tree since we might scribble on it to fix the schema name */
|
/* copy parse tree since we might scribble on it to fix the schema name */
|
||||||
parsetree = copyObject(parsetree);
|
parsetree = copyObject(parsetree);
|
||||||
|
|
||||||
|
MemoryContextSwitchTo(oldContext);
|
||||||
|
|
||||||
ddlJobs = PlanIndexStmt((IndexStmt *) parsetree, queryString);
|
ddlJobs = PlanIndexStmt((IndexStmt *) parsetree, queryString);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -392,8 +452,14 @@ multi_ProcessUtility(Node *parsetree,
|
||||||
SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);
|
SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
pstmt->utilityStmt = parsetree;
|
||||||
|
standard_ProcessUtility(pstmt, queryString, context,
|
||||||
|
params, queryEnv, dest, completionTag);
|
||||||
|
#else
|
||||||
standard_ProcessUtility(parsetree, queryString, context,
|
standard_ProcessUtility(parsetree, queryString, context,
|
||||||
params, dest, completionTag);
|
params, dest, completionTag);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* don't run post-process code for local commands */
|
/* don't run post-process code for local commands */
|
||||||
if (ddlJobs != NIL)
|
if (ddlJobs != NIL)
|
||||||
|
@ -593,6 +659,8 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, bool *commandMustR
|
||||||
{
|
{
|
||||||
bool isFrom = copyStatement->is_from;
|
bool isFrom = copyStatement->is_from;
|
||||||
Relation copiedRelation = NULL;
|
Relation copiedRelation = NULL;
|
||||||
|
char *schemaName = NULL;
|
||||||
|
MemoryContext relationContext = NULL;
|
||||||
|
|
||||||
/* consider using RangeVarGetRelidExtended to check perms before locking */
|
/* consider using RangeVarGetRelidExtended to check perms before locking */
|
||||||
copiedRelation = heap_openrv(copyStatement->relation,
|
copiedRelation = heap_openrv(copyStatement->relation,
|
||||||
|
@ -601,8 +669,12 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, bool *commandMustR
|
||||||
isDistributedRelation = IsDistributedTable(RelationGetRelid(copiedRelation));
|
isDistributedRelation = IsDistributedTable(RelationGetRelid(copiedRelation));
|
||||||
|
|
||||||
/* ensure future lookups hit the same relation */
|
/* ensure future lookups hit the same relation */
|
||||||
copyStatement->relation->schemaname = get_namespace_name(
|
schemaName = get_namespace_name(RelationGetNamespace(copiedRelation));
|
||||||
RelationGetNamespace(copiedRelation));
|
|
||||||
|
/* ensure we copy string into proper context */
|
||||||
|
relationContext = GetMemoryChunkContext(copyStatement->relation);
|
||||||
|
schemaName = MemoryContextStrdup(relationContext, schemaName);
|
||||||
|
copyStatement->relation->schemaname = schemaName;
|
||||||
|
|
||||||
heap_close(copiedRelation, NoLock);
|
heap_close(copiedRelation, NoLock);
|
||||||
}
|
}
|
||||||
|
@ -723,6 +795,7 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand)
|
||||||
bool isDistributedRelation = false;
|
bool isDistributedRelation = false;
|
||||||
char *namespaceName = NULL;
|
char *namespaceName = NULL;
|
||||||
LOCKMODE lockmode = ShareLock;
|
LOCKMODE lockmode = ShareLock;
|
||||||
|
MemoryContext relationContext = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't support concurrently creating indexes for distributed
|
* We don't support concurrently creating indexes for distributed
|
||||||
|
@ -753,6 +826,10 @@ PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand)
|
||||||
* search path by the time postgres starts processing this statement.
|
* search path by the time postgres starts processing this statement.
|
||||||
*/
|
*/
|
||||||
namespaceName = get_namespace_name(RelationGetNamespace(relation));
|
namespaceName = get_namespace_name(RelationGetNamespace(relation));
|
||||||
|
|
||||||
|
/* ensure we copy string into proper context */
|
||||||
|
relationContext = GetMemoryChunkContext(createIndexStatement->relation);
|
||||||
|
namespaceName = MemoryContextStrdup(relationContext, namespaceName);
|
||||||
createIndexStatement->relation->schemaname = namespaceName;
|
createIndexStatement->relation->schemaname = namespaceName;
|
||||||
|
|
||||||
heap_close(relation, NoLock);
|
heap_close(relation, NoLock);
|
||||||
|
@ -1506,7 +1583,7 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement)
|
||||||
/* caller uses ShareLock for non-concurrent indexes, use the same lock here */
|
/* caller uses ShareLock for non-concurrent indexes, use the same lock here */
|
||||||
LOCKMODE lockMode = ShareLock;
|
LOCKMODE lockMode = ShareLock;
|
||||||
Oid relationId = RangeVarGetRelid(relation, lockMode, missingOk);
|
Oid relationId = RangeVarGetRelid(relation, lockMode, missingOk);
|
||||||
Var *partitionKey = PartitionKey(relationId);
|
Var *partitionKey = DistPartitionKey(relationId);
|
||||||
char partitionMethod = PartitionMethod(relationId);
|
char partitionMethod = PartitionMethod(relationId);
|
||||||
List *indexParameterList = NIL;
|
List *indexParameterList = NIL;
|
||||||
ListCell *indexParameterCell = NULL;
|
ListCell *indexParameterCell = NULL;
|
||||||
|
@ -1653,7 +1730,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
partitionColumn = PartitionKey(relationId);
|
partitionColumn = DistPartitionKey(relationId);
|
||||||
|
|
||||||
tuple = SearchSysCacheAttName(relationId, alterColumnName);
|
tuple = SearchSysCacheAttName(relationId, alterColumnName);
|
||||||
if (HeapTupleIsValid(tuple))
|
if (HeapTupleIsValid(tuple))
|
||||||
|
@ -1737,7 +1814,7 @@ ErrorIfUnsupportedAlterAddConstraintStmt(AlterTableStmt *alterTableStatement)
|
||||||
LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds);
|
||||||
Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode);
|
||||||
char distributionMethod = PartitionMethod(relationId);
|
char distributionMethod = PartitionMethod(relationId);
|
||||||
Var *distributionColumn = PartitionKey(relationId);
|
Var *distributionColumn = DistPartitionKey(relationId);
|
||||||
uint32 colocationId = TableColocationId(relationId);
|
uint32 colocationId = TableColocationId(relationId);
|
||||||
Relation relation = relation_open(relationId, ExclusiveLock);
|
Relation relation = relation_open(relationId, ExclusiveLock);
|
||||||
|
|
||||||
|
@ -2010,7 +2087,7 @@ ErrorIfUnsupportedForeignConstraint(Relation relation, char distributionMethod,
|
||||||
* Partition column must exist in both referencing and referenced side of the
|
* Partition column must exist in both referencing and referenced side of the
|
||||||
* foreign key constraint. They also must be in same ordinal.
|
* foreign key constraint. They also must be in same ordinal.
|
||||||
*/
|
*/
|
||||||
referencedTablePartitionColumn = PartitionKey(referencedTableId);
|
referencedTablePartitionColumn = DistPartitionKey(referencedTableId);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -2141,6 +2218,7 @@ ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt)
|
||||||
{
|
{
|
||||||
Oid sequenceId = RangeVarGetRelid(alterSeqStmt->sequence, AccessShareLock,
|
Oid sequenceId = RangeVarGetRelid(alterSeqStmt->sequence, AccessShareLock,
|
||||||
alterSeqStmt->missing_ok);
|
alterSeqStmt->missing_ok);
|
||||||
|
bool sequenceOwned = false;
|
||||||
Oid ownedByTableId = InvalidOid;
|
Oid ownedByTableId = InvalidOid;
|
||||||
Oid newOwnedByTableId = InvalidOid;
|
Oid newOwnedByTableId = InvalidOid;
|
||||||
int32 ownedByColumnId = 0;
|
int32 ownedByColumnId = 0;
|
||||||
|
@ -2152,8 +2230,20 @@ ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* see whether the sequences is already owned by a distributed table */
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
if (sequenceIsOwned(sequenceId, &ownedByTableId, &ownedByColumnId))
|
sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_AUTO, &ownedByTableId,
|
||||||
|
&ownedByColumnId);
|
||||||
|
if (!sequenceOwned)
|
||||||
|
{
|
||||||
|
sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_INTERNAL, &ownedByTableId,
|
||||||
|
&ownedByColumnId);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
sequenceOwned = sequenceIsOwned(sequenceId, &ownedByTableId, &ownedByColumnId);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* see whether the sequence is already owned by a distributed table */
|
||||||
|
if (sequenceOwned)
|
||||||
{
|
{
|
||||||
hasDistributedOwner = IsDistributedTable(ownedByTableId);
|
hasDistributedOwner = IsDistributedTable(ownedByTableId);
|
||||||
}
|
}
|
||||||
|
@ -2350,8 +2440,9 @@ CreateLocalTable(RangeVar *relation, char *nodeName, int32 nodePort)
|
||||||
/* run only a selected set of DDL commands */
|
/* run only a selected set of DDL commands */
|
||||||
if (applyDDLCommand)
|
if (applyDDLCommand)
|
||||||
{
|
{
|
||||||
ProcessUtility(ddlCommandNode, CreateCommandTag(ddlCommandNode),
|
CitusProcessUtility(ddlCommandNode, CreateCommandTag(ddlCommandNode),
|
||||||
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
||||||
|
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2984,8 +3075,7 @@ PostProcessUtility(Node *parsetree)
|
||||||
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||||
indexForm->indisvalid = true;
|
indexForm->indisvalid = true;
|
||||||
|
|
||||||
simple_heap_update(pg_index, &indexTuple->t_self, indexTuple);
|
CatalogTupleUpdate(pg_index, &indexTuple->t_self, indexTuple);
|
||||||
CatalogUpdateIndexes(pg_index, indexTuple);
|
|
||||||
|
|
||||||
/* clean up; index now marked valid, but ROLLBACK will mark invalid */
|
/* clean up; index now marked valid, but ROLLBACK will mark invalid */
|
||||||
heap_freetuple(indexTuple);
|
heap_freetuple(indexTuple);
|
||||||
|
|
|
@ -109,11 +109,16 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
|
||||||
LOCKMODE lockMode = 0;
|
LOCKMODE lockMode = 0;
|
||||||
char partitionMethod = 0;
|
char partitionMethod = 0;
|
||||||
bool failOK = false;
|
bool failOK = false;
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString);
|
||||||
|
queryTreeNode = rawStmt->stmt;
|
||||||
|
#else
|
||||||
|
queryTreeNode = ParseTreeNode(queryString);
|
||||||
|
#endif
|
||||||
|
|
||||||
EnsureCoordinator();
|
EnsureCoordinator();
|
||||||
CheckCitusVersion(ERROR);
|
CheckCitusVersion(ERROR);
|
||||||
|
|
||||||
queryTreeNode = ParseTreeNode(queryString);
|
|
||||||
if (!IsA(queryTreeNode, DeleteStmt))
|
if (!IsA(queryTreeNode, DeleteStmt))
|
||||||
{
|
{
|
||||||
ereport(ERROR, (errmsg("query \"%s\" is not a delete statement",
|
ereport(ERROR, (errmsg("query \"%s\" is not a delete statement",
|
||||||
|
@ -144,7 +149,11 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
|
||||||
CheckDistributedTable(relationId);
|
CheckDistributedTable(relationId);
|
||||||
EnsureTablePermissions(relationId, ACL_DELETE);
|
EnsureTablePermissions(relationId, ACL_DELETE);
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL);
|
||||||
|
#else
|
||||||
queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0);
|
queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0);
|
||||||
|
#endif
|
||||||
deleteQuery = (Query *) linitial(queryTreeList);
|
deleteQuery = (Query *) linitial(queryTreeList);
|
||||||
CheckTableCount(deleteQuery);
|
CheckTableCount(deleteQuery);
|
||||||
|
|
||||||
|
@ -490,7 +499,7 @@ CheckDeleteCriteria(Node *deleteCriteria)
|
||||||
static void
|
static void
|
||||||
CheckPartitionColumn(Oid relationId, Node *whereClause)
|
CheckPartitionColumn(Oid relationId, Node *whereClause)
|
||||||
{
|
{
|
||||||
Var *partitionColumn = PartitionKey(relationId);
|
Var *partitionColumn = DistPartitionKey(relationId);
|
||||||
ListCell *columnCell = NULL;
|
ListCell *columnCell = NULL;
|
||||||
|
|
||||||
List *columnList = pull_var_clause_default(whereClause);
|
List *columnList = pull_var_clause_default(whereClause);
|
||||||
|
@ -558,7 +567,11 @@ ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList,
|
||||||
restrictInfoList = lappend(restrictInfoList, lessThanRestrictInfo);
|
restrictInfoList = lappend(restrictInfoList, lessThanRestrictInfo);
|
||||||
restrictInfoList = lappend(restrictInfoList, greaterThanRestrictInfo);
|
restrictInfoList = lappend(restrictInfoList, greaterThanRestrictInfo);
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList, false);
|
||||||
|
#else
|
||||||
dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList);
|
dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList);
|
||||||
|
#endif
|
||||||
if (dropShard)
|
if (dropShard)
|
||||||
{
|
{
|
||||||
dropShardIntervalList = lappend(dropShardIntervalList, shardInterval);
|
dropShardIntervalList = lappend(dropShardIntervalList, shardInterval);
|
||||||
|
|
|
@ -800,8 +800,7 @@ InsertShardRow(Oid relationId, uint64 shardId, char storageType,
|
||||||
tupleDescriptor = RelationGetDescr(pgDistShard);
|
tupleDescriptor = RelationGetDescr(pgDistShard);
|
||||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||||
|
|
||||||
simple_heap_insert(pgDistShard, heapTuple);
|
CatalogTupleInsert(pgDistShard, heapTuple);
|
||||||
CatalogUpdateIndexes(pgDistShard, heapTuple);
|
|
||||||
|
|
||||||
/* invalidate previous cache entry and close relation */
|
/* invalidate previous cache entry and close relation */
|
||||||
CitusInvalidateRelcacheByRelid(relationId);
|
CitusInvalidateRelcacheByRelid(relationId);
|
||||||
|
@ -848,8 +847,7 @@ InsertShardPlacementRow(uint64 shardId, uint64 placementId,
|
||||||
tupleDescriptor = RelationGetDescr(pgDistShardPlacement);
|
tupleDescriptor = RelationGetDescr(pgDistShardPlacement);
|
||||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||||
|
|
||||||
simple_heap_insert(pgDistShardPlacement, heapTuple);
|
CatalogTupleInsert(pgDistShardPlacement, heapTuple);
|
||||||
CatalogUpdateIndexes(pgDistShardPlacement, heapTuple);
|
|
||||||
|
|
||||||
CitusInvalidateRelcacheByShardId(shardId);
|
CitusInvalidateRelcacheByShardId(shardId);
|
||||||
|
|
||||||
|
@ -904,8 +902,8 @@ InsertIntoPgDistPartition(Oid relationId, char distributionMethod,
|
||||||
newTuple = heap_form_tuple(RelationGetDescr(pgDistPartition), newValues, newNulls);
|
newTuple = heap_form_tuple(RelationGetDescr(pgDistPartition), newValues, newNulls);
|
||||||
|
|
||||||
/* finally insert tuple, build index entries & register cache invalidation */
|
/* finally insert tuple, build index entries & register cache invalidation */
|
||||||
simple_heap_insert(pgDistPartition, newTuple);
|
CatalogTupleInsert(pgDistPartition, newTuple);
|
||||||
CatalogUpdateIndexes(pgDistPartition, newTuple);
|
|
||||||
CitusInvalidateRelcacheByRelid(relationId);
|
CitusInvalidateRelcacheByRelid(relationId);
|
||||||
|
|
||||||
RecordDistributedRelationDependencies(relationId, (Node *) distributionColumn);
|
RecordDistributedRelationDependencies(relationId, (Node *) distributionColumn);
|
||||||
|
@ -946,8 +944,13 @@ RecordDistributedRelationDependencies(Oid distributedRelationId, Node *distribut
|
||||||
recordDependencyOn(&relationAddr, &citusExtensionAddr, DEPENDENCY_NORMAL);
|
recordDependencyOn(&relationAddr, &citusExtensionAddr, DEPENDENCY_NORMAL);
|
||||||
|
|
||||||
/* make sure the distribution key column/expression does not just go away */
|
/* make sure the distribution key column/expression does not just go away */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
recordDependencyOnSingleRelExpr(&relationAddr, distributionKey, distributedRelationId,
|
||||||
|
DEPENDENCY_NORMAL, DEPENDENCY_NORMAL, false);
|
||||||
|
#else
|
||||||
recordDependencyOnSingleRelExpr(&relationAddr, distributionKey, distributedRelationId,
|
recordDependencyOnSingleRelExpr(&relationAddr, distributionKey, distributedRelationId,
|
||||||
DEPENDENCY_NORMAL, DEPENDENCY_NORMAL);
|
DEPENDENCY_NORMAL, DEPENDENCY_NORMAL);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1156,9 +1159,8 @@ UpdateShardPlacementState(uint64 placementId, char shardState)
|
||||||
replace[Anum_pg_dist_shard_placement_shardstate - 1] = true;
|
replace[Anum_pg_dist_shard_placement_shardstate - 1] = true;
|
||||||
|
|
||||||
heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace);
|
heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace);
|
||||||
simple_heap_update(pgDistShardPlacement, &heapTuple->t_self, heapTuple);
|
|
||||||
|
|
||||||
CatalogUpdateIndexes(pgDistShardPlacement, heapTuple);
|
CatalogTupleUpdate(pgDistShardPlacement, &heapTuple->t_self, heapTuple);
|
||||||
|
|
||||||
shardId = DatumGetInt64(heap_getattr(heapTuple,
|
shardId = DatumGetInt64(heap_getattr(heapTuple,
|
||||||
Anum_pg_dist_shard_placement_shardid,
|
Anum_pg_dist_shard_placement_shardid,
|
||||||
|
@ -1223,9 +1225,8 @@ UpdateColocationGroupReplicationFactor(uint32 colocationId, int replicationFacto
|
||||||
replace[Anum_pg_dist_colocation_replicationfactor - 1] = true;
|
replace[Anum_pg_dist_colocation_replicationfactor - 1] = true;
|
||||||
|
|
||||||
newHeapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace);
|
newHeapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace);
|
||||||
simple_heap_update(pgDistColocation, &newHeapTuple->t_self, newHeapTuple);
|
|
||||||
|
|
||||||
CatalogUpdateIndexes(pgDistColocation, newHeapTuple);
|
CatalogTupleUpdate(pgDistColocation, &newHeapTuple->t_self, newHeapTuple);
|
||||||
|
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
|
|
||||||
|
|
|
@ -85,12 +85,17 @@ master_modify_multiple_shards(PG_FUNCTION_ARGS)
|
||||||
List *prunedShardIntervalList = NIL;
|
List *prunedShardIntervalList = NIL;
|
||||||
List *taskList = NIL;
|
List *taskList = NIL;
|
||||||
int32 affectedTupleCount = 0;
|
int32 affectedTupleCount = 0;
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString);
|
||||||
|
queryTreeNode = rawStmt->stmt;
|
||||||
|
#else
|
||||||
|
queryTreeNode = ParseTreeNode(queryString);
|
||||||
|
#endif
|
||||||
|
|
||||||
EnsureCoordinator();
|
EnsureCoordinator();
|
||||||
CheckCitusVersion(ERROR);
|
CheckCitusVersion(ERROR);
|
||||||
|
|
||||||
|
|
||||||
queryTreeNode = ParseTreeNode(queryString);
|
|
||||||
if (IsA(queryTreeNode, DeleteStmt))
|
if (IsA(queryTreeNode, DeleteStmt))
|
||||||
{
|
{
|
||||||
DeleteStmt *deleteStatement = (DeleteStmt *) queryTreeNode;
|
DeleteStmt *deleteStatement = (DeleteStmt *) queryTreeNode;
|
||||||
|
@ -136,7 +141,11 @@ master_modify_multiple_shards(PG_FUNCTION_ARGS)
|
||||||
|
|
||||||
CheckDistributedTable(relationId);
|
CheckDistributedTable(relationId);
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL);
|
||||||
|
#else
|
||||||
queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0);
|
queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0);
|
||||||
|
#endif
|
||||||
modifyQuery = (Query *) linitial(queryTreeList);
|
modifyQuery = (Query *) linitial(queryTreeList);
|
||||||
|
|
||||||
if (modifyQuery->commandType != CMD_UTILITY)
|
if (modifyQuery->commandType != CMD_UTILITY)
|
||||||
|
|
|
@ -60,6 +60,9 @@
|
||||||
#include "utils/relcache.h"
|
#include "utils/relcache.h"
|
||||||
#include "utils/ruleutils.h"
|
#include "utils/ruleutils.h"
|
||||||
#include "utils/tqual.h"
|
#include "utils/tqual.h"
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
#include "utils/varlena.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* Shard related configuration */
|
/* Shard related configuration */
|
||||||
|
@ -495,7 +498,11 @@ GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults)
|
||||||
{
|
{
|
||||||
List *tableDDLEventList = NIL;
|
List *tableDDLEventList = NIL;
|
||||||
char tableType = 0;
|
char tableType = 0;
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
List *sequenceIdlist = getOwnedSequences(relationId, InvalidAttrNumber);
|
||||||
|
#else
|
||||||
List *sequenceIdlist = getOwnedSequences(relationId);
|
List *sequenceIdlist = getOwnedSequences(relationId);
|
||||||
|
#endif
|
||||||
ListCell *sequenceIdCell;
|
ListCell *sequenceIdCell;
|
||||||
char *tableSchemaDef = NULL;
|
char *tableSchemaDef = NULL;
|
||||||
char *tableColumnOptionsDef = NULL;
|
char *tableColumnOptionsDef = NULL;
|
||||||
|
|
|
@ -19,7 +19,11 @@
|
||||||
#include "distributed/metadata_cache.h"
|
#include "distributed/metadata_cache.h"
|
||||||
#include "distributed/multi_client_executor.h"
|
#include "distributed/multi_client_executor.h"
|
||||||
#include "libpq/hba.h"
|
#include "libpq/hba.h"
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
#include "common/ip.h"
|
||||||
|
#else
|
||||||
#include "libpq/ip.h"
|
#include "libpq/ip.h"
|
||||||
|
#endif
|
||||||
#include "libpq/libpq-be.h"
|
#include "libpq/libpq-be.h"
|
||||||
#include "postmaster/postmaster.h"
|
#include "postmaster/postmaster.h"
|
||||||
#include "storage/fd.h"
|
#include "storage/fd.h"
|
||||||
|
|
|
@ -812,9 +812,8 @@ MarkNodeHasMetadata(char *nodeName, int32 nodePort, bool hasMetadata)
|
||||||
replace[Anum_pg_dist_node_hasmetadata - 1] = true;
|
replace[Anum_pg_dist_node_hasmetadata - 1] = true;
|
||||||
|
|
||||||
heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace);
|
heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace);
|
||||||
simple_heap_update(pgDistNode, &heapTuple->t_self, heapTuple);
|
|
||||||
|
|
||||||
CatalogUpdateIndexes(pgDistNode, heapTuple);
|
CatalogTupleUpdate(pgDistNode, &heapTuple->t_self, heapTuple);
|
||||||
|
|
||||||
CitusInvalidateRelcacheByRelid(DistNodeRelationId());
|
CitusInvalidateRelcacheByRelid(DistNodeRelationId());
|
||||||
|
|
||||||
|
@ -837,7 +836,11 @@ List *
|
||||||
SequenceDDLCommandsForTable(Oid relationId)
|
SequenceDDLCommandsForTable(Oid relationId)
|
||||||
{
|
{
|
||||||
List *sequenceDDLList = NIL;
|
List *sequenceDDLList = NIL;
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber);
|
||||||
|
#else
|
||||||
List *ownedSequences = getOwnedSequences(relationId);
|
List *ownedSequences = getOwnedSequences(relationId);
|
||||||
|
#endif
|
||||||
ListCell *listCell;
|
ListCell *listCell;
|
||||||
char *ownerName = TableOwner(relationId);
|
char *ownerName = TableOwner(relationId);
|
||||||
|
|
||||||
|
@ -921,7 +924,19 @@ EnsureSupportedSequenceColumnType(Oid sequenceOid)
|
||||||
bool hasMetadataWorkers = HasMetadataWorkers();
|
bool hasMetadataWorkers = HasMetadataWorkers();
|
||||||
|
|
||||||
/* call sequenceIsOwned in order to get the tableId and columnId */
|
/* call sequenceIsOwned in order to get the tableId and columnId */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
bool sequenceOwned = sequenceIsOwned(sequenceOid, DEPENDENCY_AUTO, &tableId,
|
||||||
|
&columnId);
|
||||||
|
if (!sequenceOwned)
|
||||||
|
{
|
||||||
|
sequenceOwned = sequenceIsOwned(sequenceOid, DEPENDENCY_INTERNAL, &tableId,
|
||||||
|
&columnId);
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert(sequenceOwned);
|
||||||
|
#else
|
||||||
sequenceIsOwned(sequenceOid, &tableId, &columnId);
|
sequenceIsOwned(sequenceOid, &tableId, &columnId);
|
||||||
|
#endif
|
||||||
|
|
||||||
shouldSyncMetadata = ShouldSyncTableMetadata(tableId);
|
shouldSyncMetadata = ShouldSyncTableMetadata(tableId);
|
||||||
|
|
||||||
|
|
|
@ -72,8 +72,6 @@ typedef struct RemoteExplainPlan
|
||||||
|
|
||||||
|
|
||||||
/* Explain functions for distributed queries */
|
/* Explain functions for distributed queries */
|
||||||
static void CitusExplainOneQuery(Query *query, IntoClause *into, ExplainState *es,
|
|
||||||
const char *queryString, ParamListInfo params);
|
|
||||||
static void ExplainJob(Job *job, ExplainState *es);
|
static void ExplainJob(Job *job, ExplainState *es);
|
||||||
static void ExplainMapMergeJob(MapMergeJob *mapMergeJob, ExplainState *es);
|
static void ExplainMapMergeJob(MapMergeJob *mapMergeJob, ExplainState *es);
|
||||||
static void ExplainTaskList(List *taskList, ExplainState *es);
|
static void ExplainTaskList(List *taskList, ExplainState *es);
|
||||||
|
@ -85,6 +83,15 @@ static void ExplainTaskPlacement(ShardPlacement *taskPlacement, List *explainOut
|
||||||
static StringInfo BuildRemoteExplainQuery(char *queryString, ExplainState *es);
|
static StringInfo BuildRemoteExplainQuery(char *queryString, ExplainState *es);
|
||||||
|
|
||||||
/* Static Explain functions copied from explain.c */
|
/* Static Explain functions copied from explain.c */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
static void ExplainOneQuery(Query *query, int cursorOptions,
|
||||||
|
IntoClause *into, ExplainState *es,
|
||||||
|
const char *queryString, ParamListInfo params,
|
||||||
|
QueryEnvironment *queryEnv);
|
||||||
|
#else
|
||||||
|
static void ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es,
|
||||||
|
const char *queryString, ParamListInfo params);
|
||||||
|
#endif
|
||||||
static void ExplainOpenGroup(const char *objtype, const char *labelname,
|
static void ExplainOpenGroup(const char *objtype, const char *labelname,
|
||||||
bool labeled, ExplainState *es);
|
bool labeled, ExplainState *es);
|
||||||
static void ExplainCloseGroup(const char *objtype, const char *labelname,
|
static void ExplainCloseGroup(const char *objtype, const char *labelname,
|
||||||
|
@ -121,6 +128,42 @@ CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CoordinatorInsertSelectExplainScan is a custom scan explain callback function
|
||||||
|
* which is used to print explain information of a Citus plan for an INSERT INTO
|
||||||
|
* distributed_table SELECT ... query that is evaluated on the coordinator.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
CoordinatorInsertSelectExplainScan(CustomScanState *node, List *ancestors,
|
||||||
|
struct ExplainState *es)
|
||||||
|
{
|
||||||
|
CitusScanState *scanState = (CitusScanState *) node;
|
||||||
|
MultiPlan *multiPlan = scanState->multiPlan;
|
||||||
|
Query *query = multiPlan->insertSelectSubquery;
|
||||||
|
IntoClause *into = NULL;
|
||||||
|
ParamListInfo params = NULL;
|
||||||
|
char *queryString = NULL;
|
||||||
|
|
||||||
|
if (es->analyze)
|
||||||
|
{
|
||||||
|
/* avoiding double execution here is tricky, error out for now */
|
||||||
|
ereport(ERROR, (errmsg("EXPLAIN ANALYZE is currently not supported for INSERT "
|
||||||
|
"... SELECT commands via the coordinator")));
|
||||||
|
}
|
||||||
|
|
||||||
|
ExplainOpenGroup("Select Query", "Select Query", false, es);
|
||||||
|
|
||||||
|
/* explain the inner SELECT query */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
ExplainOneQuery(query, 0, into, es, queryString, params, NULL);
|
||||||
|
#else
|
||||||
|
ExplainOneQuery(query, into, es, queryString, params);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ExplainCloseGroup("Select Query", "Select Query", false, es);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ExplainJob shows the EXPLAIN output for a Job in the physical plan of
|
* ExplainJob shows the EXPLAIN output for a Job in the physical plan of
|
||||||
* a distributed query by showing the remote EXPLAIN for the first task,
|
* a distributed query by showing the remote EXPLAIN for the first task,
|
||||||
|
@ -539,6 +582,61 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es)
|
||||||
|
|
||||||
|
|
||||||
/* *INDENT-OFF* */
|
/* *INDENT-OFF* */
|
||||||
|
/*
|
||||||
|
* ExplainOneQuery -
|
||||||
|
* print out the execution plan for one Query
|
||||||
|
*
|
||||||
|
* "into" is NULL unless we are explaining the contents of a CreateTableAsStmt.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
ExplainOneQuery(Query *query, int cursorOptions,
|
||||||
|
IntoClause *into, ExplainState *es,
|
||||||
|
const char *queryString, ParamListInfo params,
|
||||||
|
QueryEnvironment *queryEnv)
|
||||||
|
#else
|
||||||
|
ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es,
|
||||||
|
const char *queryString, ParamListInfo params)
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
/* if an advisor plugin is present, let it manage things */
|
||||||
|
if (ExplainOneQuery_hook)
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
(*ExplainOneQuery_hook) (query, cursorOptions, into, es,
|
||||||
|
queryString, params);
|
||||||
|
#else
|
||||||
|
(*ExplainOneQuery_hook) (query, into, es, queryString, params);
|
||||||
|
#endif
|
||||||
|
else
|
||||||
|
{
|
||||||
|
PlannedStmt *plan;
|
||||||
|
instr_time planstart,
|
||||||
|
planduration;
|
||||||
|
|
||||||
|
INSTR_TIME_SET_CURRENT(planstart);
|
||||||
|
|
||||||
|
/* plan the query */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
plan = pg_plan_query(query, cursorOptions, params);
|
||||||
|
#elif (PG_VERSION_NUM >= 90600)
|
||||||
|
plan = pg_plan_query(query, into ? 0 : CURSOR_OPT_PARALLEL_OK, params);
|
||||||
|
#else
|
||||||
|
plan = pg_plan_query(query, 0, params);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
INSTR_TIME_SET_CURRENT(planduration);
|
||||||
|
INSTR_TIME_SUBTRACT(planduration, planstart);
|
||||||
|
|
||||||
|
/* run it (if needed) and produce output */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
ExplainOnePlan(plan, into, es, queryString, params, queryEnv,
|
||||||
|
&planduration);
|
||||||
|
#else
|
||||||
|
ExplainOnePlan(plan, into, es, queryString, params, &planduration);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Open a group of related objects.
|
* Open a group of related objects.
|
||||||
*
|
*
|
||||||
|
@ -715,73 +813,3 @@ ExplainYAMLLineStarting(ExplainState *es)
|
||||||
appendStringInfoSpaces(es->str, es->indent * 2);
|
appendStringInfoSpaces(es->str, es->indent * 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* CoordinatorInsertSelectExplainScan is a custom scan explain callback function
|
|
||||||
* which is used to print explain information of a Citus plan for an INSERT INTO
|
|
||||||
* distributed_table SELECT ... query that is evaluated on the coordinator.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
CoordinatorInsertSelectExplainScan(CustomScanState *node, List *ancestors,
|
|
||||||
struct ExplainState *es)
|
|
||||||
{
|
|
||||||
CitusScanState *scanState = (CitusScanState *) node;
|
|
||||||
MultiPlan *multiPlan = scanState->multiPlan;
|
|
||||||
Query *query = multiPlan->insertSelectSubquery;
|
|
||||||
IntoClause *into = NULL;
|
|
||||||
ParamListInfo params = NULL;
|
|
||||||
char *queryString = NULL;
|
|
||||||
|
|
||||||
if (es->analyze)
|
|
||||||
{
|
|
||||||
/* avoiding double execution here is tricky, error out for now */
|
|
||||||
ereport(ERROR, (errmsg("EXPLAIN ANALYZE is currently not supported for INSERT "
|
|
||||||
"... SELECT commands via the coordinator")));
|
|
||||||
}
|
|
||||||
|
|
||||||
ExplainOpenGroup("Select Query", "Select Query", false, es);
|
|
||||||
|
|
||||||
/* explain the inner SELECT query */
|
|
||||||
CitusExplainOneQuery(query, into, es, queryString, params);
|
|
||||||
|
|
||||||
ExplainCloseGroup("Select Query", "Select Query", false, es);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* CitusExplainOneQuery is simply a duplicate of ExplainOneQuery in explain.c, which
|
|
||||||
* is static.
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
CitusExplainOneQuery(Query *query, IntoClause *into, ExplainState *es,
|
|
||||||
const char *queryString, ParamListInfo params)
|
|
||||||
{
|
|
||||||
/* copied from ExplainOneQuery in explain.c */
|
|
||||||
if (ExplainOneQuery_hook)
|
|
||||||
{
|
|
||||||
(*ExplainOneQuery_hook) (query, into, es, queryString, params);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
PlannedStmt *plan;
|
|
||||||
instr_time planstart,
|
|
||||||
planduration;
|
|
||||||
int cursorOptions = 0;
|
|
||||||
|
|
||||||
INSTR_TIME_SET_CURRENT(planstart);
|
|
||||||
|
|
||||||
#if (PG_VERSION_NUM >= 90600)
|
|
||||||
cursorOptions = into ? 0 : CURSOR_OPT_PARALLEL_OK;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* plan the query */
|
|
||||||
plan = pg_plan_query(query, cursorOptions, params);
|
|
||||||
|
|
||||||
INSTR_TIME_SET_CURRENT(planduration);
|
|
||||||
INSTR_TIME_SUBTRACT(planduration, planstart);
|
|
||||||
|
|
||||||
/* run it (if needed) and produce output */
|
|
||||||
ExplainOnePlan(plan, into, es, queryString, params, &planduration);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1526,7 +1526,7 @@ RightColumn(OpExpr *joinClause)
|
||||||
Var *
|
Var *
|
||||||
PartitionColumn(Oid relationId, uint32 rangeTableId)
|
PartitionColumn(Oid relationId, uint32 rangeTableId)
|
||||||
{
|
{
|
||||||
Var *partitionKey = PartitionKey(relationId);
|
Var *partitionKey = DistPartitionKey(relationId);
|
||||||
Var *partitionColumn = NULL;
|
Var *partitionColumn = NULL;
|
||||||
|
|
||||||
/* short circuit for reference tables */
|
/* short circuit for reference tables */
|
||||||
|
@ -1544,7 +1544,7 @@ PartitionColumn(Oid relationId, uint32 rangeTableId)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PartitionKey returns the partition key column for the given relation. Note
|
* DistPartitionKey returns the partition key column for the given relation. Note
|
||||||
* that in the context of distributed join and query planning, the callers of
|
* that in the context of distributed join and query planning, the callers of
|
||||||
* this function *must* set the partition key column's range table reference
|
* this function *must* set the partition key column's range table reference
|
||||||
* (varno) to match the table's location in the query range table list.
|
* (varno) to match the table's location in the query range table list.
|
||||||
|
@ -1553,7 +1553,7 @@ PartitionColumn(Oid relationId, uint32 rangeTableId)
|
||||||
* returns NULL when called for reference tables.
|
* returns NULL when called for reference tables.
|
||||||
*/
|
*/
|
||||||
Var *
|
Var *
|
||||||
PartitionKey(Oid relationId)
|
DistPartitionKey(Oid relationId)
|
||||||
{
|
{
|
||||||
DistTableCacheEntry *partitionEntry = DistributedTableCacheEntry(relationId);
|
DistTableCacheEntry *partitionEntry = DistributedTableCacheEntry(relationId);
|
||||||
Node *variableNode = NULL;
|
Node *variableNode = NULL;
|
||||||
|
|
|
@ -47,6 +47,9 @@
|
||||||
#include "utils/builtins.h"
|
#include "utils/builtins.h"
|
||||||
#include "utils/fmgroids.h"
|
#include "utils/fmgroids.h"
|
||||||
#include "utils/lsyscache.h"
|
#include "utils/lsyscache.h"
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
#include "utils/regproc.h"
|
||||||
|
#endif
|
||||||
#include "utils/rel.h"
|
#include "utils/rel.h"
|
||||||
#include "utils/syscache.h"
|
#include "utils/syscache.h"
|
||||||
#include "utils/tqual.h"
|
#include "utils/tqual.h"
|
||||||
|
@ -1718,7 +1721,12 @@ MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType,
|
||||||
* will convert the types of the aggregates if necessary.
|
* will convert the types of the aggregates if necessary.
|
||||||
*/
|
*/
|
||||||
operatorNameList = list_make1(makeString(DIVISION_OPER_NAME));
|
operatorNameList = list_make1(makeString(DIVISION_OPER_NAME));
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, NULL,
|
||||||
|
-1);
|
||||||
|
#else
|
||||||
opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, -1);
|
opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, -1);
|
||||||
|
#endif
|
||||||
|
|
||||||
return opExpr;
|
return opExpr;
|
||||||
}
|
}
|
||||||
|
@ -2845,7 +2853,7 @@ IsPartitionColumn(Expr *columnExpression, Query *query)
|
||||||
|
|
||||||
if (relationId != InvalidOid && column != NULL)
|
if (relationId != InvalidOid && column != NULL)
|
||||||
{
|
{
|
||||||
Var *partitionColumn = PartitionKey(relationId);
|
Var *partitionColumn = DistPartitionKey(relationId);
|
||||||
|
|
||||||
/* not all distributed tables have partition column */
|
/* not all distributed tables have partition column */
|
||||||
if (partitionColumn != NULL && column->varattno == partitionColumn->varattno)
|
if (partitionColumn != NULL && column->varattno == partitionColumn->varattno)
|
||||||
|
@ -3119,7 +3127,7 @@ PartitionColumnOpExpressionList(Query *query)
|
||||||
Assert(rangeTableEntry->rtekind == RTE_RELATION);
|
Assert(rangeTableEntry->rtekind == RTE_RELATION);
|
||||||
|
|
||||||
relationId = rangeTableEntry->relid;
|
relationId = rangeTableEntry->relid;
|
||||||
partitionColumn = PartitionKey(relationId);
|
partitionColumn = DistPartitionKey(relationId);
|
||||||
|
|
||||||
if (partitionColumn != NULL &&
|
if (partitionColumn != NULL &&
|
||||||
candidatePartitionColumn->varattno == partitionColumn->varattno)
|
candidatePartitionColumn->varattno == partitionColumn->varattno)
|
||||||
|
|
|
@ -131,6 +131,9 @@ static DeferredErrorMessage * InsertPartitionColumnMatchesSelect(Query *query,
|
||||||
Oid *
|
Oid *
|
||||||
selectPartitionColumnTableId);
|
selectPartitionColumnTableId);
|
||||||
static DeferredErrorMessage * ErrorIfQueryHasModifyingCTE(Query *queryTree);
|
static DeferredErrorMessage * ErrorIfQueryHasModifyingCTE(Query *queryTree);
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
static List * get_all_actual_clauses(List *restrictinfo_list);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1638,13 +1641,14 @@ MasterIrreducibleExpressionWalker(Node *expression, WalkerState *state)
|
||||||
* should be checked in this function.
|
* should be checked in this function.
|
||||||
*
|
*
|
||||||
* Look through contain_mutable_functions_walker or future PG's equivalent for new
|
* Look through contain_mutable_functions_walker or future PG's equivalent for new
|
||||||
* node types before bumping this version number to fix compilation.
|
* node types before bumping this version number to fix compilation; e.g. for any
|
||||||
|
* PostgreSQL after 9.5, see check_functions_in_node.
|
||||||
*
|
*
|
||||||
* Once you've added them to this check, make sure you also evaluate them in the
|
* Once you've added them to this check, make sure you also evaluate them in the
|
||||||
* executor!
|
* executor!
|
||||||
*/
|
*/
|
||||||
StaticAssertStmt(PG_VERSION_NUM < 90700, "When porting to a newer PG this section"
|
StaticAssertStmt(PG_VERSION_NUM < 100100, "When porting to a newer PG this section"
|
||||||
" needs to be reviewed.");
|
" needs to be reviewed.");
|
||||||
if (IsA(expression, Aggref))
|
if (IsA(expression, Aggref))
|
||||||
{
|
{
|
||||||
Aggref *expr = (Aggref *) expression;
|
Aggref *expr = (Aggref *) expression;
|
||||||
|
@ -1836,12 +1840,20 @@ TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTre
|
||||||
List *restrictClauseList = WhereClauseList(joinTree);
|
List *restrictClauseList = WhereClauseList(joinTree);
|
||||||
OpExpr *equalityExpr = MakeOpExpression(column, BTEqualStrategyNumber);
|
OpExpr *equalityExpr = MakeOpExpression(column, BTEqualStrategyNumber);
|
||||||
Const *rightConst = (Const *) get_rightop((Expr *) equalityExpr);
|
Const *rightConst = (Const *) get_rightop((Expr *) equalityExpr);
|
||||||
|
bool predicateIsImplied = false;
|
||||||
|
|
||||||
rightConst->constvalue = newValue->constvalue;
|
rightConst->constvalue = newValue->constvalue;
|
||||||
rightConst->constisnull = newValue->constisnull;
|
rightConst->constisnull = newValue->constisnull;
|
||||||
rightConst->constbyval = newValue->constbyval;
|
rightConst->constbyval = newValue->constbyval;
|
||||||
|
|
||||||
if (predicate_implied_by(list_make1(equalityExpr), restrictClauseList))
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
predicateIsImplied = predicate_implied_by(list_make1(equalityExpr),
|
||||||
|
restrictClauseList, false);
|
||||||
|
#else
|
||||||
|
predicateIsImplied = predicate_implied_by(list_make1(equalityExpr),
|
||||||
|
restrictClauseList);
|
||||||
|
#endif
|
||||||
|
if (predicateIsImplied)
|
||||||
{
|
{
|
||||||
/* target entry of the form SET col = <x> WHERE col = <x> AND ... */
|
/* target entry of the form SET col = <x> WHERE col = <x> AND ... */
|
||||||
isColumnValueChanged = false;
|
isColumnValueChanged = false;
|
||||||
|
@ -3036,3 +3048,34 @@ ErrorIfQueryHasModifyingCTE(Query *queryTree)
|
||||||
/* everything OK */
|
/* everything OK */
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* get_all_actual_clauses
|
||||||
|
*
|
||||||
|
* Returns a list containing the bare clauses from 'restrictinfo_list'.
|
||||||
|
*
|
||||||
|
* This loses the distinction between regular and pseudoconstant clauses,
|
||||||
|
* so be careful what you use it for.
|
||||||
|
*/
|
||||||
|
static List *
|
||||||
|
get_all_actual_clauses(List *restrictinfo_list)
|
||||||
|
{
|
||||||
|
List *result = NIL;
|
||||||
|
ListCell *l;
|
||||||
|
|
||||||
|
foreach(l, restrictinfo_list)
|
||||||
|
{
|
||||||
|
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
|
||||||
|
|
||||||
|
Assert(IsA(rinfo, RestrictInfo));
|
||||||
|
|
||||||
|
result = lappend(result, rinfo->clause);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
|
@ -278,7 +278,7 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
relationPartitionKey = PartitionKey(relationOid);
|
relationPartitionKey = DistPartitionKey(relationOid);
|
||||||
|
|
||||||
translaterVars = targetAppendRelInfo->translated_vars;
|
translaterVars = targetAppendRelInfo->translated_vars;
|
||||||
foreach(translatedVarCell, translaterVars)
|
foreach(translatedVarCell, translaterVars)
|
||||||
|
@ -429,7 +429,7 @@ EquivalenceListContainsRelationsEquality(List *attributeEquivalenceList,
|
||||||
(RelationRestriction *) lfirst(relationRestrictionCell);
|
(RelationRestriction *) lfirst(relationRestrictionCell);
|
||||||
int rteIdentity = GetRTEIdentity(relationRestriction->rte);
|
int rteIdentity = GetRTEIdentity(relationRestriction->rte);
|
||||||
|
|
||||||
if (PartitionKey(relationRestriction->relationId) &&
|
if (DistPartitionKey(relationRestriction->relationId) &&
|
||||||
!bms_is_member(rteIdentity, commonRteIdentities))
|
!bms_is_member(rteIdentity, commonRteIdentities))
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
@ -1114,7 +1114,7 @@ AddRteRelationToAttributeEquivalenceClass(AttributeEquivalenceClass **
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
relationPartitionKey = PartitionKey(relationId);
|
relationPartitionKey = DistPartitionKey(relationId);
|
||||||
if (relationPartitionKey->varattno != varToBeAdded->varattno)
|
if (relationPartitionKey->varattno != varToBeAdded->varattno)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -156,7 +156,11 @@ _PG_init(void)
|
||||||
planner_hook = multi_planner;
|
planner_hook = multi_planner;
|
||||||
|
|
||||||
/* register utility hook */
|
/* register utility hook */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
ProcessUtility_hook = multi_ProcessUtility;
|
ProcessUtility_hook = multi_ProcessUtility;
|
||||||
|
#else
|
||||||
|
ProcessUtility_hook = multi_ProcessUtility9x;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* register for planner hook */
|
/* register for planner hook */
|
||||||
set_rel_pathlist_hook = multi_relation_restriction_hook;
|
set_rel_pathlist_hook = multi_relation_restriction_hook;
|
||||||
|
|
|
@ -50,9 +50,14 @@ deparse_shard_query_test(PG_FUNCTION_ARGS)
|
||||||
{
|
{
|
||||||
Node *parsetree = (Node *) lfirst(parseTreeCell);
|
Node *parsetree = (Node *) lfirst(parseTreeCell);
|
||||||
ListCell *queryTreeCell = NULL;
|
ListCell *queryTreeCell = NULL;
|
||||||
|
List *queryTreeList = NIL;
|
||||||
|
|
||||||
List *queryTreeList = pg_analyze_and_rewrite(parsetree, queryStringChar,
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
NULL, 0);
|
queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar,
|
||||||
|
NULL, 0, NULL);
|
||||||
|
#else
|
||||||
|
queryTreeList = pg_analyze_and_rewrite(parsetree, queryStringChar, NULL, 0);
|
||||||
|
#endif
|
||||||
|
|
||||||
foreach(queryTreeCell, queryTreeList)
|
foreach(queryTreeCell, queryTreeList)
|
||||||
{
|
{
|
||||||
|
|
|
@ -100,8 +100,8 @@ LogTransactionRecord(int groupId, char *transactionName)
|
||||||
tupleDescriptor = RelationGetDescr(pgDistTransaction);
|
tupleDescriptor = RelationGetDescr(pgDistTransaction);
|
||||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||||
|
|
||||||
simple_heap_insert(pgDistTransaction, heapTuple);
|
CatalogTupleInsert(pgDistTransaction, heapTuple);
|
||||||
CatalogUpdateIndexes(pgDistTransaction, heapTuple);
|
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
|
|
||||||
/* close relation and invalidate previous cache entry */
|
/* close relation and invalidate previous cache entry */
|
||||||
|
|
|
@ -364,7 +364,11 @@ citus_evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod,
|
||||||
/*
|
/*
|
||||||
* And evaluate it.
|
* And evaluate it.
|
||||||
*/
|
*/
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
const_val = ExecEvalExprSwitchContext(exprstate, econtext, &const_is_null);
|
||||||
|
#else
|
||||||
const_val = ExecEvalExprSwitchContext(exprstate, econtext, &const_is_null, NULL);
|
const_val = ExecEvalExprSwitchContext(exprstate, econtext, &const_is_null, NULL);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Get info needed about result datatype */
|
/* Get info needed about result datatype */
|
||||||
get_typlenbyval(result_type, &resultTypLen, &resultTypByVal);
|
get_typlenbyval(result_type, &resultTypLen, &resultTypByVal);
|
||||||
|
|
|
@ -294,6 +294,10 @@ GetRangeTblKind(RangeTblEntry *rte)
|
||||||
switch (rte->rtekind)
|
switch (rte->rtekind)
|
||||||
{
|
{
|
||||||
/* directly rtekind if it's not possibly an extended RTE */
|
/* directly rtekind if it's not possibly an extended RTE */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
case RTE_TABLEFUNC:
|
||||||
|
case RTE_NAMEDTUPLESTORE:
|
||||||
|
#endif
|
||||||
case RTE_RELATION:
|
case RTE_RELATION:
|
||||||
case RTE_SUBQUERY:
|
case RTE_SUBQUERY:
|
||||||
case RTE_JOIN:
|
case RTE_JOIN:
|
||||||
|
|
|
@ -193,10 +193,18 @@ pg_get_sequencedef_string(Oid sequenceRelationId)
|
||||||
|
|
||||||
/* build our DDL command */
|
/* build our DDL command */
|
||||||
qualifiedSequenceName = generate_relation_name(sequenceRelationId, NIL);
|
qualifiedSequenceName = generate_relation_name(sequenceRelationId, NIL);
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName,
|
||||||
|
pgSequenceForm->seqincrement, pgSequenceForm->seqmin,
|
||||||
|
pgSequenceForm->seqmax, pgSequenceForm->seqstart,
|
||||||
|
pgSequenceForm->seqcycle ? "" : "NO ");
|
||||||
|
#else
|
||||||
sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName,
|
sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName,
|
||||||
pgSequenceForm->increment_by, pgSequenceForm->min_value,
|
pgSequenceForm->increment_by, pgSequenceForm->min_value,
|
||||||
pgSequenceForm->max_value, pgSequenceForm->start_value,
|
pgSequenceForm->max_value, pgSequenceForm->start_value,
|
||||||
pgSequenceForm->is_cycled ? "" : "NO ");
|
pgSequenceForm->is_cycled ? "" : "NO ");
|
||||||
|
#endif
|
||||||
|
|
||||||
return sequenceDef;
|
return sequenceDef;
|
||||||
}
|
}
|
||||||
|
@ -210,8 +218,20 @@ Form_pg_sequence
|
||||||
pg_get_sequencedef(Oid sequenceRelationId)
|
pg_get_sequencedef(Oid sequenceRelationId)
|
||||||
{
|
{
|
||||||
Form_pg_sequence pgSequenceForm = NULL;
|
Form_pg_sequence pgSequenceForm = NULL;
|
||||||
SysScanDesc scanDescriptor = NULL;
|
|
||||||
HeapTuple heapTuple = NULL;
|
HeapTuple heapTuple = NULL;
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
heapTuple = SearchSysCache1(SEQRELID, sequenceRelationId);
|
||||||
|
if (!HeapTupleIsValid(heapTuple))
|
||||||
|
{
|
||||||
|
elog(ERROR, "cache lookup failed for sequence %u", sequenceRelationId);
|
||||||
|
}
|
||||||
|
|
||||||
|
pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple);
|
||||||
|
|
||||||
|
ReleaseSysCache(heapTuple);
|
||||||
|
#else
|
||||||
|
SysScanDesc scanDescriptor = NULL;
|
||||||
Relation sequenceRel = NULL;
|
Relation sequenceRel = NULL;
|
||||||
AclResult permissionCheck = ACLCHECK_NO_PRIV;
|
AclResult permissionCheck = ACLCHECK_NO_PRIV;
|
||||||
|
|
||||||
|
@ -241,6 +261,7 @@ pg_get_sequencedef(Oid sequenceRelationId)
|
||||||
systable_endscan(scanDescriptor);
|
systable_endscan(scanDescriptor);
|
||||||
|
|
||||||
heap_close(sequenceRel, AccessShareLock);
|
heap_close(sequenceRel, AccessShareLock);
|
||||||
|
#endif
|
||||||
|
|
||||||
return pgSequenceForm;
|
return pgSequenceForm;
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include "commands/sequence.h"
|
#include "commands/sequence.h"
|
||||||
#include "distributed/colocation_utils.h"
|
#include "distributed/colocation_utils.h"
|
||||||
#include "distributed/listutils.h"
|
#include "distributed/listutils.h"
|
||||||
|
#include "distributed/master_metadata_utility.h"
|
||||||
#include "distributed/master_protocol.h"
|
#include "distributed/master_protocol.h"
|
||||||
#include "distributed/metadata_cache.h"
|
#include "distributed/metadata_cache.h"
|
||||||
#include "distributed/metadata_sync.h"
|
#include "distributed/metadata_sync.h"
|
||||||
|
@ -126,7 +127,7 @@ MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId)
|
||||||
uint32 shardCount = ShardIntervalCount(sourceRelationId);
|
uint32 shardCount = ShardIntervalCount(sourceRelationId);
|
||||||
uint32 shardReplicationFactor = TableShardReplicationFactor(sourceRelationId);
|
uint32 shardReplicationFactor = TableShardReplicationFactor(sourceRelationId);
|
||||||
|
|
||||||
Var *sourceDistributionColumn = PartitionKey(sourceRelationId);
|
Var *sourceDistributionColumn = DistPartitionKey(sourceRelationId);
|
||||||
Oid sourceDistributionColumnType = InvalidOid;
|
Oid sourceDistributionColumnType = InvalidOid;
|
||||||
|
|
||||||
/* reference tables has NULL distribution column */
|
/* reference tables has NULL distribution column */
|
||||||
|
@ -477,8 +478,7 @@ CreateColocationGroup(int shardCount, int replicationFactor, Oid distributionCol
|
||||||
tupleDescriptor = RelationGetDescr(pgDistColocation);
|
tupleDescriptor = RelationGetDescr(pgDistColocation);
|
||||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||||
|
|
||||||
simple_heap_insert(pgDistColocation, heapTuple);
|
CatalogTupleInsert(pgDistColocation, heapTuple);
|
||||||
CatalogUpdateIndexes(pgDistColocation, heapTuple);
|
|
||||||
|
|
||||||
/* increment the counter so that next command can see the row */
|
/* increment the counter so that next command can see the row */
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
|
@ -567,7 +567,7 @@ CheckDistributionColumnType(Oid sourceRelationId, Oid targetRelationId)
|
||||||
Oid targetDistributionColumnType = InvalidOid;
|
Oid targetDistributionColumnType = InvalidOid;
|
||||||
|
|
||||||
/* reference tables have NULL distribution column */
|
/* reference tables have NULL distribution column */
|
||||||
sourceDistributionColumn = PartitionKey(sourceRelationId);
|
sourceDistributionColumn = DistPartitionKey(sourceRelationId);
|
||||||
if (sourceDistributionColumn == NULL)
|
if (sourceDistributionColumn == NULL)
|
||||||
{
|
{
|
||||||
sourceDistributionColumnType = InvalidOid;
|
sourceDistributionColumnType = InvalidOid;
|
||||||
|
@ -578,7 +578,7 @@ CheckDistributionColumnType(Oid sourceRelationId, Oid targetRelationId)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reference tables have NULL distribution column */
|
/* reference tables have NULL distribution column */
|
||||||
targetDistributionColumn = PartitionKey(targetRelationId);
|
targetDistributionColumn = DistPartitionKey(targetRelationId);
|
||||||
if (targetDistributionColumn == NULL)
|
if (targetDistributionColumn == NULL)
|
||||||
{
|
{
|
||||||
targetDistributionColumnType = InvalidOid;
|
targetDistributionColumnType = InvalidOid;
|
||||||
|
@ -648,9 +648,10 @@ UpdateRelationColocationGroup(Oid distributedRelationId, uint32 colocationId)
|
||||||
replace[Anum_pg_dist_partition_colocationid - 1] = true;
|
replace[Anum_pg_dist_partition_colocationid - 1] = true;
|
||||||
|
|
||||||
heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isNull, replace);
|
heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isNull, replace);
|
||||||
simple_heap_update(pgDistPartition, &heapTuple->t_self, heapTuple);
|
|
||||||
|
|
||||||
CatalogUpdateIndexes(pgDistPartition, heapTuple);
|
|
||||||
|
CatalogTupleUpdate(pgDistPartition, &heapTuple->t_self, heapTuple);
|
||||||
|
|
||||||
CitusInvalidateRelcacheByRelid(distributedRelationId);
|
CitusInvalidateRelcacheByRelid(distributedRelationId);
|
||||||
|
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
|
|
|
@ -43,7 +43,11 @@ typedef struct MaintenanceDaemonControlData
|
||||||
* data in dbHash.
|
* data in dbHash.
|
||||||
*/
|
*/
|
||||||
int trancheId;
|
int trancheId;
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
char *lockTrancheName;
|
||||||
|
#else
|
||||||
LWLockTranche lockTranche;
|
LWLockTranche lockTranche;
|
||||||
|
#endif
|
||||||
LWLock lock;
|
LWLock lock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -257,7 +261,11 @@ CitusMaintenanceDaemonMain(Datum main_arg)
|
||||||
/*
|
/*
|
||||||
* Wait until timeout, or until somebody wakes us up.
|
* Wait until timeout, or until somebody wakes us up.
|
||||||
*/
|
*/
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
rc = WaitLatch(MyLatch, latchFlags, timeout, PG_WAIT_EXTENSION);
|
||||||
|
#else
|
||||||
rc = WaitLatch(MyLatch, latchFlags, timeout);
|
rc = WaitLatch(MyLatch, latchFlags, timeout);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* emergency bailout if postmaster has died */
|
/* emergency bailout if postmaster has died */
|
||||||
if (rc & WL_POSTMASTER_DEATH)
|
if (rc & WL_POSTMASTER_DEATH)
|
||||||
|
@ -343,6 +351,13 @@ MaintenanceDaemonShmemInit(void)
|
||||||
*/
|
*/
|
||||||
if (!alreadyInitialized)
|
if (!alreadyInitialized)
|
||||||
{
|
{
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
MaintenanceDaemonControl->trancheId = LWLockNewTrancheId();
|
||||||
|
MaintenanceDaemonControl->lockTrancheName = "Citus Maintenance Daemon";
|
||||||
|
LWLockRegisterTranche(MaintenanceDaemonControl->trancheId,
|
||||||
|
MaintenanceDaemonControl->lockTrancheName);
|
||||||
|
#else
|
||||||
|
|
||||||
/* initialize lwlock */
|
/* initialize lwlock */
|
||||||
LWLockTranche *tranche = &MaintenanceDaemonControl->lockTranche;
|
LWLockTranche *tranche = &MaintenanceDaemonControl->lockTranche;
|
||||||
|
|
||||||
|
@ -355,6 +370,8 @@ MaintenanceDaemonShmemInit(void)
|
||||||
tranche->array_stride = sizeof(LWLock);
|
tranche->array_stride = sizeof(LWLock);
|
||||||
tranche->name = "Citus Maintenance Daemon";
|
tranche->name = "Citus Maintenance Daemon";
|
||||||
LWLockRegisterTranche(MaintenanceDaemonControl->trancheId, tranche);
|
LWLockRegisterTranche(MaintenanceDaemonControl->trancheId, tranche);
|
||||||
|
#endif
|
||||||
|
|
||||||
LWLockInitialize(&MaintenanceDaemonControl->lock,
|
LWLockInitialize(&MaintenanceDaemonControl->lock,
|
||||||
MaintenanceDaemonControl->trancheId);
|
MaintenanceDaemonControl->trancheId);
|
||||||
}
|
}
|
||||||
|
|
|
@ -361,7 +361,7 @@ get_shard_id_for_distribution_column(PG_FUNCTION_ARGS)
|
||||||
inputDataType = get_fn_expr_argtype(fcinfo->flinfo, 1);
|
inputDataType = get_fn_expr_argtype(fcinfo->flinfo, 1);
|
||||||
distributionValueString = DatumToString(inputDatum, inputDataType);
|
distributionValueString = DatumToString(inputDatum, inputDataType);
|
||||||
|
|
||||||
distributionColumn = PartitionKey(relationId);
|
distributionColumn = DistPartitionKey(relationId);
|
||||||
distributionDataType = distributionColumn->vartype;
|
distributionDataType = distributionColumn->vartype;
|
||||||
|
|
||||||
distributionValueDatum = StringToDatum(distributionValueString,
|
distributionValueDatum = StringToDatum(distributionValueString,
|
||||||
|
@ -625,9 +625,9 @@ SetNodeState(char *nodeName, int32 nodePort, bool isActive)
|
||||||
replace[Anum_pg_dist_node_isactive - 1] = true;
|
replace[Anum_pg_dist_node_isactive - 1] = true;
|
||||||
|
|
||||||
heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace);
|
heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace);
|
||||||
simple_heap_update(pgDistNode, &heapTuple->t_self, heapTuple);
|
|
||||||
|
|
||||||
CatalogUpdateIndexes(pgDistNode, heapTuple);
|
CatalogTupleUpdate(pgDistNode, &heapTuple->t_self, heapTuple);
|
||||||
|
|
||||||
CitusInvalidateRelcacheByRelid(DistNodeRelationId());
|
CitusInvalidateRelcacheByRelid(DistNodeRelationId());
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
|
|
||||||
|
@ -868,8 +868,7 @@ InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, uint32 groupId, char *
|
||||||
tupleDescriptor = RelationGetDescr(pgDistNode);
|
tupleDescriptor = RelationGetDescr(pgDistNode);
|
||||||
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
|
||||||
|
|
||||||
simple_heap_insert(pgDistNode, heapTuple);
|
CatalogTupleInsert(pgDistNode, heapTuple);
|
||||||
CatalogUpdateIndexes(pgDistNode, heapTuple);
|
|
||||||
|
|
||||||
/* close relation and invalidate previous cache entry */
|
/* close relation and invalidate previous cache entry */
|
||||||
heap_close(pgDistNode, AccessExclusiveLock);
|
heap_close(pgDistNode, AccessExclusiveLock);
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -578,6 +578,13 @@ TaskTrackerShmemInit(void)
|
||||||
|
|
||||||
if (!alreadyInitialized)
|
if (!alreadyInitialized)
|
||||||
{
|
{
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
WorkerTasksSharedState->taskHashTrancheId = LWLockNewTrancheId();
|
||||||
|
WorkerTasksSharedState->taskHashTrancheName = "Worker Task Hash Tranche";
|
||||||
|
LWLockRegisterTranche(WorkerTasksSharedState->taskHashTrancheId,
|
||||||
|
WorkerTasksSharedState->taskHashTrancheName);
|
||||||
|
#else
|
||||||
|
|
||||||
/* initialize lwlock protecting the task tracker hash table */
|
/* initialize lwlock protecting the task tracker hash table */
|
||||||
LWLockTranche *tranche = &WorkerTasksSharedState->taskHashLockTranche;
|
LWLockTranche *tranche = &WorkerTasksSharedState->taskHashLockTranche;
|
||||||
|
|
||||||
|
@ -586,6 +593,8 @@ TaskTrackerShmemInit(void)
|
||||||
tranche->array_stride = sizeof(LWLock);
|
tranche->array_stride = sizeof(LWLock);
|
||||||
tranche->name = "Worker Task Hash Tranche";
|
tranche->name = "Worker Task Hash Tranche";
|
||||||
LWLockRegisterTranche(WorkerTasksSharedState->taskHashTrancheId, tranche);
|
LWLockRegisterTranche(WorkerTasksSharedState->taskHashTrancheId, tranche);
|
||||||
|
#endif
|
||||||
|
|
||||||
LWLockInitialize(&WorkerTasksSharedState->taskHashLock,
|
LWLockInitialize(&WorkerTasksSharedState->taskHashLock,
|
||||||
WorkerTasksSharedState->taskHashTrancheId);
|
WorkerTasksSharedState->taskHashTrancheId);
|
||||||
}
|
}
|
||||||
|
|
|
@ -291,11 +291,17 @@ CreateJobSchema(StringInfo schemaName)
|
||||||
|
|
||||||
createSchemaStmt = makeNode(CreateSchemaStmt);
|
createSchemaStmt = makeNode(CreateSchemaStmt);
|
||||||
createSchemaStmt->schemaname = schemaName->data;
|
createSchemaStmt->schemaname = schemaName->data;
|
||||||
createSchemaStmt->authrole = (Node *) ¤tUserRole;
|
|
||||||
createSchemaStmt->schemaElts = NIL;
|
createSchemaStmt->schemaElts = NIL;
|
||||||
|
|
||||||
/* actually create schema with the current user as owner */
|
/* actually create schema with the current user as owner */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
createSchemaStmt->authrole = ¤tUserRole;
|
||||||
|
CreateSchemaCommand(createSchemaStmt, queryString, -1, -1);
|
||||||
|
#else
|
||||||
|
createSchemaStmt->authrole = (Node *) ¤tUserRole;
|
||||||
CreateSchemaCommand(createSchemaStmt, queryString);
|
CreateSchemaCommand(createSchemaStmt, queryString);
|
||||||
|
#endif
|
||||||
|
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
|
|
||||||
/* and reset environment */
|
/* and reset environment */
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
#include "distributed/multi_client_executor.h"
|
#include "distributed/multi_client_executor.h"
|
||||||
#include "distributed/multi_logical_optimizer.h"
|
#include "distributed/multi_logical_optimizer.h"
|
||||||
#include "distributed/multi_server_executor.h"
|
#include "distributed/multi_server_executor.h"
|
||||||
|
#include "distributed/multi_utility.h"
|
||||||
#include "distributed/relay_utility.h"
|
#include "distributed/relay_utility.h"
|
||||||
#include "distributed/remote_commands.h"
|
#include "distributed/remote_commands.h"
|
||||||
#include "distributed/resource_lock.h"
|
#include "distributed/resource_lock.h"
|
||||||
|
@ -44,6 +45,10 @@
|
||||||
#include "tcop/utility.h"
|
#include "tcop/utility.h"
|
||||||
#include "utils/builtins.h"
|
#include "utils/builtins.h"
|
||||||
#include "utils/lsyscache.h"
|
#include "utils/lsyscache.h"
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
#include "utils/regproc.h"
|
||||||
|
#include "utils/varlena.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* Config variable managed via guc.c */
|
/* Config variable managed via guc.c */
|
||||||
|
@ -428,8 +433,8 @@ worker_apply_shard_ddl_command(PG_FUNCTION_ARGS)
|
||||||
|
|
||||||
/* extend names in ddl command and apply extended command */
|
/* extend names in ddl command and apply extended command */
|
||||||
RelayEventExtendNames(ddlCommandNode, schemaName, shardId);
|
RelayEventExtendNames(ddlCommandNode, schemaName, shardId);
|
||||||
ProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_TOPLEVEL,
|
CitusProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_TOPLEVEL, NULL,
|
||||||
NULL, None_Receiver, NULL);
|
None_Receiver, NULL);
|
||||||
|
|
||||||
PG_RETURN_VOID();
|
PG_RETURN_VOID();
|
||||||
}
|
}
|
||||||
|
@ -460,8 +465,8 @@ worker_apply_inter_shard_ddl_command(PG_FUNCTION_ARGS)
|
||||||
RelayEventExtendNamesForInterShardCommands(ddlCommandNode, leftShardId,
|
RelayEventExtendNamesForInterShardCommands(ddlCommandNode, leftShardId,
|
||||||
leftShardSchemaName, rightShardId,
|
leftShardSchemaName, rightShardId,
|
||||||
rightShardSchemaName);
|
rightShardSchemaName);
|
||||||
ProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_TOPLEVEL, NULL,
|
CitusProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_TOPLEVEL, NULL,
|
||||||
None_Receiver, NULL);
|
None_Receiver, NULL);
|
||||||
|
|
||||||
PG_RETURN_VOID();
|
PG_RETURN_VOID();
|
||||||
}
|
}
|
||||||
|
@ -496,8 +501,8 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* run the CREATE SEQUENCE command */
|
/* run the CREATE SEQUENCE command */
|
||||||
ProcessUtility(commandNode, commandString, PROCESS_UTILITY_TOPLEVEL,
|
CitusProcessUtility(commandNode, commandString, PROCESS_UTILITY_TOPLEVEL, NULL,
|
||||||
NULL, None_Receiver, NULL);
|
None_Receiver, NULL);
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
|
|
||||||
createSequenceStatement = (CreateSeqStmt *) commandNode;
|
createSequenceStatement = (CreateSeqStmt *) commandNode;
|
||||||
|
@ -851,8 +856,8 @@ FetchRegularTable(const char *nodeName, uint32 nodePort, const char *tableName)
|
||||||
StringInfo ddlCommand = (StringInfo) lfirst(ddlCommandCell);
|
StringInfo ddlCommand = (StringInfo) lfirst(ddlCommandCell);
|
||||||
Node *ddlCommandNode = ParseTreeNode(ddlCommand->data);
|
Node *ddlCommandNode = ParseTreeNode(ddlCommand->data);
|
||||||
|
|
||||||
ProcessUtility(ddlCommandNode, ddlCommand->data, PROCESS_UTILITY_TOPLEVEL,
|
CitusProcessUtility(ddlCommandNode, ddlCommand->data, PROCESS_UTILITY_TOPLEVEL,
|
||||||
NULL, None_Receiver, NULL);
|
NULL, None_Receiver, NULL);
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -870,8 +875,8 @@ FetchRegularTable(const char *nodeName, uint32 nodePort, const char *tableName)
|
||||||
queryString = makeStringInfo();
|
queryString = makeStringInfo();
|
||||||
appendStringInfo(queryString, COPY_IN_COMMAND, tableName, localFilePath->data);
|
appendStringInfo(queryString, COPY_IN_COMMAND, tableName, localFilePath->data);
|
||||||
|
|
||||||
ProcessUtility((Node *) localCopyCommand, queryString->data,
|
CitusProcessUtility((Node *) localCopyCommand, queryString->data,
|
||||||
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
||||||
|
|
||||||
/* finally delete the temporary file we created */
|
/* finally delete the temporary file we created */
|
||||||
DeleteFile(localFilePath->data);
|
DeleteFile(localFilePath->data);
|
||||||
|
@ -945,8 +950,8 @@ FetchForeignTable(const char *nodeName, uint32 nodePort, const char *tableName)
|
||||||
StringInfo ddlCommand = (StringInfo) lfirst(ddlCommandCell);
|
StringInfo ddlCommand = (StringInfo) lfirst(ddlCommandCell);
|
||||||
Node *ddlCommandNode = ParseTreeNode(ddlCommand->data);
|
Node *ddlCommandNode = ParseTreeNode(ddlCommand->data);
|
||||||
|
|
||||||
ProcessUtility(ddlCommandNode, ddlCommand->data, PROCESS_UTILITY_TOPLEVEL,
|
CitusProcessUtility(ddlCommandNode, ddlCommand->data, PROCESS_UTILITY_TOPLEVEL,
|
||||||
NULL, None_Receiver, NULL);
|
NULL, None_Receiver, NULL);
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1130,6 +1135,22 @@ ExecuteRemoteQuery(const char *nodeName, uint32 nodePort, char *runAsUser,
|
||||||
*/
|
*/
|
||||||
Node *
|
Node *
|
||||||
ParseTreeNode(const char *ddlCommand)
|
ParseTreeNode(const char *ddlCommand)
|
||||||
|
{
|
||||||
|
Node *parseTreeNode = ParseTreeRawStmt(ddlCommand);
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
parseTreeNode = ((RawStmt *) parseTreeNode)->stmt;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return parseTreeNode;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Parses the given DDL command, and returns the tree node for parsed command.
|
||||||
|
*/
|
||||||
|
Node *
|
||||||
|
ParseTreeRawStmt(const char *ddlCommand)
|
||||||
{
|
{
|
||||||
Node *parseTreeNode = NULL;
|
Node *parseTreeNode = NULL;
|
||||||
List *parseTreeList = NULL;
|
List *parseTreeList = NULL;
|
||||||
|
@ -1237,8 +1258,8 @@ worker_append_table_to_shard(PG_FUNCTION_ARGS)
|
||||||
appendStringInfo(queryString, COPY_IN_COMMAND, shardQualifiedName,
|
appendStringInfo(queryString, COPY_IN_COMMAND, shardQualifiedName,
|
||||||
localFilePath->data);
|
localFilePath->data);
|
||||||
|
|
||||||
ProcessUtility((Node *) localCopyCommand, queryString->data,
|
CitusProcessUtility((Node *) localCopyCommand, queryString->data,
|
||||||
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
||||||
|
|
||||||
/* finally delete the temporary file we created */
|
/* finally delete the temporary file we created */
|
||||||
DeleteFile(localFilePath->data);
|
DeleteFile(localFilePath->data);
|
||||||
|
@ -1299,6 +1320,14 @@ AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName)
|
||||||
Form_pg_sequence sequenceData = pg_get_sequencedef(sequenceId);
|
Form_pg_sequence sequenceData = pg_get_sequencedef(sequenceId);
|
||||||
int64 startValue = 0;
|
int64 startValue = 0;
|
||||||
int64 maxValue = 0;
|
int64 maxValue = 0;
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
int64 sequenceMaxValue = sequenceData->seqmax;
|
||||||
|
int64 sequenceMinValue = sequenceData->seqmin;
|
||||||
|
#else
|
||||||
|
int64 sequenceMaxValue = sequenceData->max_value;
|
||||||
|
int64 sequenceMinValue = sequenceData->min_value;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* calculate min/max values that the sequence can generate in this worker */
|
/* calculate min/max values that the sequence can generate in this worker */
|
||||||
startValue = (((int64) GetLocalGroupId()) << 48) + 1;
|
startValue = (((int64) GetLocalGroupId()) << 48) + 1;
|
||||||
|
@ -1309,7 +1338,7 @@ AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName)
|
||||||
* their correct values. This happens when the sequence has been created
|
* their correct values. This happens when the sequence has been created
|
||||||
* during shard, before the current worker having the metadata.
|
* during shard, before the current worker having the metadata.
|
||||||
*/
|
*/
|
||||||
if (sequenceData->min_value != startValue || sequenceData->max_value != maxValue)
|
if (sequenceMinValue != startValue || sequenceMaxValue != maxValue)
|
||||||
{
|
{
|
||||||
StringInfo startNumericString = makeStringInfo();
|
StringInfo startNumericString = makeStringInfo();
|
||||||
StringInfo maxNumericString = makeStringInfo();
|
StringInfo maxNumericString = makeStringInfo();
|
||||||
|
@ -1337,8 +1366,8 @@ AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName)
|
||||||
SetDefElemArg(alterSequenceStatement, "restart", startFloatArg);
|
SetDefElemArg(alterSequenceStatement, "restart", startFloatArg);
|
||||||
|
|
||||||
/* since the command is an AlterSeqStmt, a dummy command string works fine */
|
/* since the command is an AlterSeqStmt, a dummy command string works fine */
|
||||||
ProcessUtility((Node *) alterSequenceStatement, dummyString,
|
CitusProcessUtility((Node *) alterSequenceStatement, dummyString,
|
||||||
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1368,6 +1397,11 @@ SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
defElem = makeDefElem((char *) name, arg, -1);
|
||||||
|
#else
|
||||||
defElem = makeDefElem((char *) name, arg);
|
defElem = makeDefElem((char *) name, arg);
|
||||||
|
#endif
|
||||||
|
|
||||||
statement->options = lappend(statement->options, defElem);
|
statement->options = lappend(statement->options, defElem);
|
||||||
}
|
}
|
||||||
|
|
|
@ -329,7 +329,6 @@ RemoveJobSchema(StringInfo schemaName)
|
||||||
if (OidIsValid(schemaId))
|
if (OidIsValid(schemaId))
|
||||||
{
|
{
|
||||||
ObjectAddress schemaObject = { 0, 0, 0 };
|
ObjectAddress schemaObject = { 0, 0, 0 };
|
||||||
bool showNotices = false;
|
|
||||||
|
|
||||||
bool permissionsOK = pg_namespace_ownercheck(schemaId, GetUserId());
|
bool permissionsOK = pg_namespace_ownercheck(schemaId, GetUserId());
|
||||||
if (!permissionsOK)
|
if (!permissionsOK)
|
||||||
|
@ -347,7 +346,16 @@ RemoveJobSchema(StringInfo schemaName)
|
||||||
* can suppress notice messages that are typically displayed during
|
* can suppress notice messages that are typically displayed during
|
||||||
* cascading deletes.
|
* cascading deletes.
|
||||||
*/
|
*/
|
||||||
deleteWhatDependsOn(&schemaObject, showNotices);
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
performDeletion(&schemaObject, DROP_CASCADE,
|
||||||
|
PERFORM_DELETION_INTERNAL |
|
||||||
|
PERFORM_DELETION_QUIETLY |
|
||||||
|
PERFORM_DELETION_SKIP_ORIGINAL |
|
||||||
|
PERFORM_DELETION_SKIP_EXTENSIONS);
|
||||||
|
#else
|
||||||
|
deleteWhatDependsOn(&schemaObject, false);
|
||||||
|
#endif
|
||||||
|
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
|
|
||||||
/* drop the empty schema */
|
/* drop the empty schema */
|
||||||
|
@ -386,7 +394,12 @@ CreateTaskTable(StringInfo schemaName, StringInfo relationName,
|
||||||
|
|
||||||
createStatement = CreateStatement(relation, columnDefinitionList);
|
createStatement = CreateStatement(relation, columnDefinitionList);
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL,
|
||||||
|
NULL);
|
||||||
|
#else
|
||||||
relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL);
|
relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL);
|
||||||
|
#endif
|
||||||
relationId = relationObject.objectId;
|
relationId = relationObject.objectId;
|
||||||
|
|
||||||
Assert(relationId != InvalidOid);
|
Assert(relationId != InvalidOid);
|
||||||
|
@ -510,11 +523,27 @@ CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName,
|
||||||
copyStatement = CopyStatement(relation, fullFilename->data);
|
copyStatement = CopyStatement(relation, fullFilename->data);
|
||||||
if (BinaryWorkerCopyFormat)
|
if (BinaryWorkerCopyFormat)
|
||||||
{
|
{
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary"),
|
||||||
|
-1);
|
||||||
|
#else
|
||||||
DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary"));
|
DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary"));
|
||||||
|
#endif
|
||||||
copyStatement->options = list_make1(copyOption);
|
copyStatement->options = list_make1(copyOption);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
{
|
||||||
|
ParseState *pstate = make_parsestate(NULL);
|
||||||
|
pstate->p_sourcetext = queryString;
|
||||||
|
|
||||||
|
DoCopy(pstate, copyStatement, -1, -1, &copiedRowCount);
|
||||||
|
|
||||||
|
free_parsestate(pstate);
|
||||||
|
}
|
||||||
|
#else
|
||||||
DoCopy(copyStatement, queryString, &copiedRowCount);
|
DoCopy(copyStatement, queryString, &copiedRowCount);
|
||||||
|
#endif
|
||||||
copiedRowTotal += copiedRowCount;
|
copiedRowTotal += copiedRowCount;
|
||||||
CommandCounterIncrement();
|
CommandCounterIncrement();
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
|
|
||||||
#include "postgres.h"
|
#include "postgres.h"
|
||||||
#include "funcapi.h"
|
#include "funcapi.h"
|
||||||
|
#include "pgstat.h"
|
||||||
|
|
||||||
#include <arpa/inet.h>
|
#include <arpa/inet.h>
|
||||||
#include <netinet/in.h>
|
#include <netinet/in.h>
|
||||||
|
@ -742,7 +743,12 @@ FileOutputStreamFlush(FileOutputStream file)
|
||||||
int written = 0;
|
int written = 0;
|
||||||
|
|
||||||
errno = 0;
|
errno = 0;
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len,
|
||||||
|
PG_WAIT_IO);
|
||||||
|
#else
|
||||||
written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len);
|
written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len);
|
||||||
|
#endif
|
||||||
if (written != fileBuffer->len)
|
if (written != fileBuffer->len)
|
||||||
{
|
{
|
||||||
ereport(ERROR, (errcode_for_file_access(),
|
ereport(ERROR, (errcode_for_file_access(),
|
||||||
|
|
|
@ -14,6 +14,9 @@
|
||||||
#include "postgres.h" /* IWYU pragma: keep */
|
#include "postgres.h" /* IWYU pragma: keep */
|
||||||
#include "c.h"
|
#include "c.h"
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
#include "catalog/pg_sequence.h"
|
||||||
|
#endif
|
||||||
#include "commands/sequence.h"
|
#include "commands/sequence.h"
|
||||||
#include "lib/stringinfo.h"
|
#include "lib/stringinfo.h"
|
||||||
#include "nodes/parsenodes.h"
|
#include "nodes/parsenodes.h"
|
||||||
|
|
|
@ -14,8 +14,10 @@
|
||||||
#ifndef MASTER_METADATA_UTILITY_H
|
#ifndef MASTER_METADATA_UTILITY_H
|
||||||
#define MASTER_METADATA_UTILITY_H
|
#define MASTER_METADATA_UTILITY_H
|
||||||
|
|
||||||
|
#include "access/heapam.h"
|
||||||
#include "access/htup.h"
|
#include "access/htup.h"
|
||||||
#include "access/tupdesc.h"
|
#include "access/tupdesc.h"
|
||||||
|
#include "catalog/indexing.h"
|
||||||
#include "distributed/citus_nodes.h"
|
#include "distributed/citus_nodes.h"
|
||||||
#include "distributed/relay_utility.h"
|
#include "distributed/relay_utility.h"
|
||||||
#include "utils/acl.h"
|
#include "utils/acl.h"
|
||||||
|
@ -29,6 +31,27 @@
|
||||||
#define PG_RELATION_SIZE_FUNCTION "pg_relation_size(%s)"
|
#define PG_RELATION_SIZE_FUNCTION "pg_relation_size(%s)"
|
||||||
#define PG_TOTAL_RELATION_SIZE_FUNCTION "pg_total_relation_size(%s)"
|
#define PG_TOTAL_RELATION_SIZE_FUNCTION "pg_total_relation_size(%s)"
|
||||||
|
|
||||||
|
#if (PG_VERSION_NUM < 100000)
|
||||||
|
static inline void
|
||||||
|
CatalogTupleUpdate(Relation heapRel, ItemPointer otid, HeapTuple tup)
|
||||||
|
{
|
||||||
|
simple_heap_update(heapRel, otid, tup);
|
||||||
|
CatalogUpdateIndexes(heapRel, tup);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline Oid
|
||||||
|
CatalogTupleInsert(Relation heapRel, HeapTuple tup)
|
||||||
|
{
|
||||||
|
Oid oid = simple_heap_insert(heapRel, tup);
|
||||||
|
CatalogUpdateIndexes(heapRel, tup);
|
||||||
|
|
||||||
|
return oid;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
/* In-memory representation of a typed tuple in pg_dist_shard. */
|
/* In-memory representation of a typed tuple in pg_dist_shard. */
|
||||||
typedef struct ShardInterval
|
typedef struct ShardInterval
|
||||||
{
|
{
|
||||||
|
|
|
@ -90,7 +90,7 @@ extern OpExpr * DualPartitionJoinClause(List *applicableJoinClauses);
|
||||||
extern Var * LeftColumn(OpExpr *joinClause);
|
extern Var * LeftColumn(OpExpr *joinClause);
|
||||||
extern Var * RightColumn(OpExpr *joinClause);
|
extern Var * RightColumn(OpExpr *joinClause);
|
||||||
extern Var * PartitionColumn(Oid relationId, uint32 rangeTableId);
|
extern Var * PartitionColumn(Oid relationId, uint32 rangeTableId);
|
||||||
extern Var * PartitionKey(Oid relationId);
|
extern Var * DistPartitionKey(Oid relationId);
|
||||||
extern char PartitionMethod(Oid relationId);
|
extern char PartitionMethod(Oid relationId);
|
||||||
extern char TableReplicationModel(Oid relationId);
|
extern char TableReplicationModel(Oid relationId);
|
||||||
|
|
||||||
|
|
|
@ -55,8 +55,14 @@ typedef enum CitusRTEKind
|
||||||
CITUS_RTE_SUBQUERY = RTE_SUBQUERY, /* subquery in FROM */
|
CITUS_RTE_SUBQUERY = RTE_SUBQUERY, /* subquery in FROM */
|
||||||
CITUS_RTE_JOIN = RTE_JOIN, /* join */
|
CITUS_RTE_JOIN = RTE_JOIN, /* join */
|
||||||
CITUS_RTE_FUNCTION = RTE_FUNCTION, /* function in FROM */
|
CITUS_RTE_FUNCTION = RTE_FUNCTION, /* function in FROM */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
CITUS_RTE_TABLEFUNC = RTE_TABLEFUNC, /* TableFunc(.., column list) */
|
||||||
|
#endif
|
||||||
CITUS_RTE_VALUES = RTE_VALUES, /* VALUES (<exprlist>), (<exprlist>), ... */
|
CITUS_RTE_VALUES = RTE_VALUES, /* VALUES (<exprlist>), (<exprlist>), ... */
|
||||||
CITUS_RTE_CTE = RTE_CTE, /* common table expr (WITH list element) */
|
CITUS_RTE_CTE = RTE_CTE, /* common table expr (WITH list element) */
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
CITUS_RTE_NAMEDTUPLESTORE = RTE_NAMEDTUPLESTORE, /* tuplestore, e.g. for triggers */
|
||||||
|
#endif
|
||||||
CITUS_RTE_SHARD,
|
CITUS_RTE_SHARD,
|
||||||
CITUS_RTE_REMOTE_QUERY
|
CITUS_RTE_REMOTE_QUERY
|
||||||
} CitusRTEKind;
|
} CitusRTEKind;
|
||||||
|
|
|
@ -29,9 +29,20 @@ typedef struct DDLJob
|
||||||
List *taskList; /* worker DDL tasks to execute */
|
List *taskList; /* worker DDL tasks to execute */
|
||||||
} DDLJob;
|
} DDLJob;
|
||||||
|
|
||||||
extern void multi_ProcessUtility(Node *parsetree, const char *queryString,
|
#if (PG_VERSION_NUM < 100000)
|
||||||
|
struct QueryEnvironment; /* forward-declare to appease compiler */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||||
ProcessUtilityContext context, ParamListInfo params,
|
ProcessUtilityContext context, ParamListInfo params,
|
||||||
DestReceiver *dest, char *completionTag);
|
struct QueryEnvironment *queryEnv, DestReceiver *dest,
|
||||||
|
char *completionTag);
|
||||||
|
extern void multi_ProcessUtility9x(Node *parsetree, const char *queryString,
|
||||||
|
ProcessUtilityContext context, ParamListInfo params,
|
||||||
|
DestReceiver *dest, char *completionTag);
|
||||||
|
extern void CitusProcessUtility(Node *node, const char *queryString,
|
||||||
|
ProcessUtilityContext context, ParamListInfo params,
|
||||||
|
DestReceiver *dest, char *completionTag);
|
||||||
extern List * PlanGrantStmt(GrantStmt *grantStmt);
|
extern List * PlanGrantStmt(GrantStmt *grantStmt);
|
||||||
extern void ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
|
extern void ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
|
||||||
Var *distributionColumn, uint32 colocationId);
|
Var *distributionColumn, uint32 colocationId);
|
||||||
|
|
|
@ -99,7 +99,11 @@ typedef struct WorkerTasksSharedStateData
|
||||||
|
|
||||||
/* Lock protecting workerNodesHash */
|
/* Lock protecting workerNodesHash */
|
||||||
int taskHashTrancheId;
|
int taskHashTrancheId;
|
||||||
|
#if (PG_VERSION_NUM >= 100000)
|
||||||
|
char *taskHashTrancheName;
|
||||||
|
#else
|
||||||
LWLockTranche taskHashLockTranche;
|
LWLockTranche taskHashLockTranche;
|
||||||
|
#endif
|
||||||
LWLock taskHashLock;
|
LWLock taskHashLock;
|
||||||
} WorkerTasksSharedStateData;
|
} WorkerTasksSharedStateData;
|
||||||
|
|
||||||
|
|
|
@ -135,6 +135,7 @@ extern Datum CompareCall2(FmgrInfo *funcInfo, Datum leftArgument, Datum rightArg
|
||||||
|
|
||||||
/* Function declaration for parsing tree node */
|
/* Function declaration for parsing tree node */
|
||||||
extern Node * ParseTreeNode(const char *ddlCommand);
|
extern Node * ParseTreeNode(const char *ddlCommand);
|
||||||
|
extern Node * ParseTreeRawStmt(const char *ddlCommand);
|
||||||
|
|
||||||
/* Function declarations for applying distributed execution primitives */
|
/* Function declarations for applying distributed execution primitives */
|
||||||
extern Datum worker_fetch_partition_file(PG_FUNCTION_ARGS);
|
extern Datum worker_fetch_partition_file(PG_FUNCTION_ARGS);
|
||||||
|
|
|
@ -456,17 +456,17 @@ INSERT INTO products VALUES(1,'product_1', 10, 8);
|
||||||
ERROR: single-shard DML commands must not appear in transaction blocks which contain multi-shard data modifications
|
ERROR: single-shard DML commands must not appear in transaction blocks which contain multi-shard data modifications
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
-- There should be no constraint on master and worker(s)
|
-- There should be no constraint on master and worker(s)
|
||||||
\d products
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass;
|
||||||
Table "public.products"
|
Constraint | Definition
|
||||||
Column | Type | Modifiers
|
------------+------------
|
||||||
------------------+---------+-----------
|
(0 rows)
|
||||||
product_no | integer |
|
|
||||||
name | text |
|
|
||||||
price | numeric |
|
|
||||||
discounted_price | numeric |
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d products_1450199
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass;
|
||||||
|
Constraint | Definition
|
||||||
|
------------+------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Tests to check the effect of rollback
|
-- Tests to check the effect of rollback
|
||||||
BEGIN;
|
BEGIN;
|
||||||
|
@ -478,16 +478,16 @@ ALTER TABLE products ADD CONSTRAINT check_price CHECK(price > discounted_price);
|
||||||
ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no);
|
ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no);
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
-- There should be no constraint on master and worker(s)
|
-- There should be no constraint on master and worker(s)
|
||||||
\d products
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass;
|
||||||
Table "public.products"
|
Constraint | Definition
|
||||||
Column | Type | Modifiers
|
------------+------------
|
||||||
------------------+---------+-----------
|
(0 rows)
|
||||||
product_no | integer |
|
|
||||||
name | text |
|
|
||||||
price | numeric |
|
|
||||||
discounted_price | numeric |
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d products_1450199
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass;
|
||||||
|
Constraint | Definition
|
||||||
|
------------+------------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP TABLE products;
|
DROP TABLE products;
|
||||||
|
|
|
@ -617,18 +617,18 @@ ERROR: cannot colocate tables table1_groupe and table_bigint
|
||||||
DETAIL: Distribution column types don't match for table1_groupe and table_bigint.
|
DETAIL: Distribution column types don't match for table1_groupe and table_bigint.
|
||||||
-- check worker table schemas
|
-- check worker table schemas
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d table3_groupE_1300050
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table3_groupE_1300050'::regclass;
|
||||||
Table "public.table3_groupe_1300050"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------------+---------+-----------
|
--------------+---------+-----------
|
||||||
dummy_column | text |
|
dummy_column | text |
|
||||||
id | integer |
|
id | integer |
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
\d schema_collocation.table4_groupE_1300052
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='schema_collocation.table4_groupE_1300052'::regclass;
|
||||||
Table "schema_collocation.table4_groupe_1300052"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
id | integer |
|
id | integer |
|
||||||
|
(1 row)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
CREATE TABLE table1_groupF ( id int );
|
CREATE TABLE table1_groupF ( id int );
|
||||||
|
|
|
@ -401,80 +401,19 @@ SELECT master_create_worker_shards('check_example', '2', '2');
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d check_example*
|
\d check_example_partition_col_key_365040
|
||||||
Table "public.check_example_365040"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
-----------------+---------+-----------
|
|
||||||
partition_col | integer |
|
|
||||||
other_col | integer |
|
|
||||||
other_other_col | integer |
|
|
||||||
Indexes:
|
|
||||||
"check_example_partition_col_key_365040" UNIQUE CONSTRAINT, btree (partition_col)
|
|
||||||
Check constraints:
|
|
||||||
"check_example_other_col_check" CHECK (other_col >= 100)
|
|
||||||
"check_example_other_other_col_check" CHECK (abs(other_other_col) >= 100)
|
|
||||||
|
|
||||||
Table "public.check_example_365041"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
-----------------+---------+-----------
|
|
||||||
partition_col | integer |
|
|
||||||
other_col | integer |
|
|
||||||
other_other_col | integer |
|
|
||||||
Indexes:
|
|
||||||
"check_example_partition_col_key_365041" UNIQUE CONSTRAINT, btree (partition_col)
|
|
||||||
Check constraints:
|
|
||||||
"check_example_other_col_check" CHECK (other_col >= 100)
|
|
||||||
"check_example_other_other_col_check" CHECK (abs(other_other_col) >= 100)
|
|
||||||
|
|
||||||
Index "public.check_example_partition_col_key_365040"
|
Index "public.check_example_partition_col_key_365040"
|
||||||
Column | Type | Definition
|
Column | Type | Definition
|
||||||
---------------+---------+---------------
|
---------------+---------+---------------
|
||||||
partition_col | integer | partition_col
|
partition_col | integer | partition_col
|
||||||
unique, btree, for table "public.check_example_365040"
|
unique, btree, for table "public.check_example_365040"
|
||||||
|
|
||||||
Index "public.check_example_partition_col_key_365041"
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365040'::regclass;
|
||||||
Column | Type | Definition
|
Constraint | Definition
|
||||||
---------------+---------+---------------
|
-------------------------------------+-------------------------------------
|
||||||
partition_col | integer | partition_col
|
check_example_other_col_check | CHECK (other_col >= 100)
|
||||||
unique, btree, for table "public.check_example_365041"
|
check_example_other_other_col_check | CHECK (abs(other_other_col) >= 100)
|
||||||
|
(2 rows)
|
||||||
\c - - - :worker_2_port
|
|
||||||
\d check_example*
|
|
||||||
Table "public.check_example_365040"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
-----------------+---------+-----------
|
|
||||||
partition_col | integer |
|
|
||||||
other_col | integer |
|
|
||||||
other_other_col | integer |
|
|
||||||
Indexes:
|
|
||||||
"check_example_partition_col_key_365040" UNIQUE CONSTRAINT, btree (partition_col)
|
|
||||||
Check constraints:
|
|
||||||
"check_example_other_col_check" CHECK (other_col >= 100)
|
|
||||||
"check_example_other_other_col_check" CHECK (abs(other_other_col) >= 100)
|
|
||||||
|
|
||||||
Table "public.check_example_365041"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
-----------------+---------+-----------
|
|
||||||
partition_col | integer |
|
|
||||||
other_col | integer |
|
|
||||||
other_other_col | integer |
|
|
||||||
Indexes:
|
|
||||||
"check_example_partition_col_key_365041" UNIQUE CONSTRAINT, btree (partition_col)
|
|
||||||
Check constraints:
|
|
||||||
"check_example_other_col_check" CHECK (other_col >= 100)
|
|
||||||
"check_example_other_other_col_check" CHECK (abs(other_other_col) >= 100)
|
|
||||||
|
|
||||||
Index "public.check_example_partition_col_key_365040"
|
|
||||||
Column | Type | Definition
|
|
||||||
---------------+---------+---------------
|
|
||||||
partition_col | integer | partition_col
|
|
||||||
unique, btree, for table "public.check_example_365040"
|
|
||||||
|
|
||||||
Index "public.check_example_partition_col_key_365041"
|
|
||||||
Column | Type | Definition
|
|
||||||
---------------+---------+---------------
|
|
||||||
partition_col | integer | partition_col
|
|
||||||
unique, btree, for table "public.check_example_365041"
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- drop unnecessary tables
|
-- drop unnecessary tables
|
||||||
|
@ -501,15 +440,11 @@ SELECT create_distributed_table('raw_table_2', 'user_id');
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- see that the constraint exists
|
-- see that the constraint exists
|
||||||
\d raw_table_2
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass;
|
||||||
Table "public.raw_table_2"
|
Constraint | Definition
|
||||||
Column | Type | Modifiers
|
--------------------------+-------------------------------------------------------
|
||||||
---------+---------+-----------
|
raw_table_2_user_id_fkey | FOREIGN KEY (user_id) REFERENCES raw_table_1(user_id)
|
||||||
user_id | integer |
|
(1 row)
|
||||||
Indexes:
|
|
||||||
"raw_table_2_user_id_key" UNIQUE CONSTRAINT, btree (user_id)
|
|
||||||
Foreign-key constraints:
|
|
||||||
"raw_table_2_user_id_fkey" FOREIGN KEY (user_id) REFERENCES raw_table_1(user_id)
|
|
||||||
|
|
||||||
-- should be prevented by the foreign key
|
-- should be prevented by the foreign key
|
||||||
DROP TABLE raw_table_1;
|
DROP TABLE raw_table_1;
|
||||||
|
@ -520,13 +455,10 @@ HINT: Use DROP ... CASCADE to drop the dependent objects too.
|
||||||
DROP TABLE raw_table_1 CASCADE;
|
DROP TABLE raw_table_1 CASCADE;
|
||||||
NOTICE: drop cascades to constraint raw_table_2_user_id_fkey on table raw_table_2
|
NOTICE: drop cascades to constraint raw_table_2_user_id_fkey on table raw_table_2
|
||||||
-- see that the constraint also dropped
|
-- see that the constraint also dropped
|
||||||
\d raw_table_2
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass;
|
||||||
Table "public.raw_table_2"
|
Constraint | Definition
|
||||||
Column | Type | Modifiers
|
------------+------------
|
||||||
---------+---------+-----------
|
(0 rows)
|
||||||
user_id | integer |
|
|
||||||
Indexes:
|
|
||||||
"raw_table_2_user_id_key" UNIQUE CONSTRAINT, btree (user_id)
|
|
||||||
|
|
||||||
-- drop the table as well
|
-- drop the table as well
|
||||||
DROP TABLE raw_table_2;
|
DROP TABLE raw_table_2;
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
--
|
||||||
|
-- MULTI_CREATE_TABLE_NEW_FEATURES
|
||||||
|
--
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
major_version
|
||||||
|
---------------
|
||||||
|
10
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Verify that the GENERATED ... AS IDENTITY feature in PostgreSQL 10
|
||||||
|
-- is forbidden in distributed tables.
|
||||||
|
CREATE TABLE table_identity_col (
|
||||||
|
id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
|
||||||
|
payload text );
|
||||||
|
SELECT master_create_distributed_table('table_identity_col', 'id', 'append');
|
||||||
|
ERROR: cannot distribute relation: table_identity_col
|
||||||
|
DETAIL: Distributed relations must not use GENERATED ... AS IDENTITY.
|
||||||
|
SELECT create_distributed_table('table_identity_col', 'id');
|
||||||
|
ERROR: cannot distribute relation: table_identity_col
|
||||||
|
DETAIL: Distributed relations must not use GENERATED ... AS IDENTITY.
|
||||||
|
SELECT create_distributed_table('table_identity_col', 'text');
|
||||||
|
ERROR: cannot distribute relation: table_identity_col
|
||||||
|
DETAIL: Distributed relations must not use GENERATED ... AS IDENTITY.
|
||||||
|
SELECT create_reference_table('table_identity_col');
|
||||||
|
ERROR: cannot distribute relation: table_identity_col
|
||||||
|
DETAIL: Distributed relations must not use GENERATED ... AS IDENTITY.
|
|
@ -0,0 +1,35 @@
|
||||||
|
--
|
||||||
|
-- MULTI_CREATE_TABLE_NEW_FEATURES
|
||||||
|
--
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
major_version
|
||||||
|
---------------
|
||||||
|
9
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Verify that the GENERATED ... AS IDENTITY feature in PostgreSQL 10
|
||||||
|
-- is forbidden in distributed tables.
|
||||||
|
CREATE TABLE table_identity_col (
|
||||||
|
id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
|
||||||
|
payload text );
|
||||||
|
ERROR: syntax error at or near "GENERATED"
|
||||||
|
LINE 2: id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
|
||||||
|
^
|
||||||
|
SELECT master_create_distributed_table('table_identity_col', 'id', 'append');
|
||||||
|
ERROR: relation "table_identity_col" does not exist
|
||||||
|
LINE 1: SELECT master_create_distributed_table('table_identity_col',...
|
||||||
|
^
|
||||||
|
SELECT create_distributed_table('table_identity_col', 'id');
|
||||||
|
ERROR: relation "table_identity_col" does not exist
|
||||||
|
LINE 1: SELECT create_distributed_table('table_identity_col', 'id');
|
||||||
|
^
|
||||||
|
SELECT create_distributed_table('table_identity_col', 'text');
|
||||||
|
ERROR: relation "table_identity_col" does not exist
|
||||||
|
LINE 1: SELECT create_distributed_table('table_identity_col', 'text'...
|
||||||
|
^
|
||||||
|
SELECT create_reference_table('table_identity_col');
|
||||||
|
ERROR: relation "table_identity_col" does not exist
|
||||||
|
LINE 1: SELECT create_reference_table('table_identity_col');
|
||||||
|
^
|
|
@ -6,7 +6,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
|
||||||
SELECT substring(version(), '\d+(?:\.\d+)?') AS major_version;
|
SELECT substring(version(), '\d+(?:\.\d+)?') AS major_version;
|
||||||
major_version
|
major_version
|
||||||
---------------
|
---------------
|
||||||
9.6
|
10
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\a\t
|
\a\t
|
||||||
|
@ -903,6 +903,8 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
||||||
SET parallel_setup_cost=0;
|
SET parallel_setup_cost=0;
|
||||||
SET parallel_tuple_cost=0;
|
SET parallel_tuple_cost=0;
|
||||||
SET min_parallel_relation_size=0;
|
SET min_parallel_relation_size=0;
|
||||||
|
ERROR: unrecognized configuration parameter "min_parallel_relation_size"
|
||||||
|
SET min_parallel_table_scan_size=0;
|
||||||
SET max_parallel_workers_per_gather=4;
|
SET max_parallel_workers_per_gather=4;
|
||||||
-- ensure local plans display correctly
|
-- ensure local plans display correctly
|
||||||
CREATE TABLE lineitem_clone (LIKE lineitem);
|
CREATE TABLE lineitem_clone (LIKE lineitem);
|
||||||
|
|
|
@ -6,7 +6,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
|
||||||
SELECT substring(version(), '\d+(?:\.\d+)?') AS major_version;
|
SELECT substring(version(), '\d+(?:\.\d+)?') AS major_version;
|
||||||
major_version
|
major_version
|
||||||
---------------
|
---------------
|
||||||
9.5
|
9.6
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\a\t
|
\a\t
|
||||||
|
@ -42,7 +42,7 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
|
||||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||||
Sort
|
Sort
|
||||||
Sort Key: COALESCE((sum((COALESCE((sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity
|
Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: remote_scan.l_quantity
|
Group Key: remote_scan.l_quantity
|
||||||
-> Custom Scan (Citus Real-Time)
|
-> Custom Scan (Citus Real-Time)
|
||||||
|
@ -61,18 +61,22 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
||||||
{
|
{
|
||||||
"Plan": {
|
"Plan": {
|
||||||
"Node Type": "Sort",
|
"Node Type": "Sort",
|
||||||
"Sort Key": ["COALESCE((sum((COALESCE((sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)", "remote_scan.l_quantity"],
|
"Parallel Aware": false,
|
||||||
|
"Sort Key": ["COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)", "remote_scan.l_quantity"],
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "Aggregate",
|
"Node Type": "Aggregate",
|
||||||
"Strategy": "Hashed",
|
"Strategy": "Hashed",
|
||||||
|
"Partial Mode": "Simple",
|
||||||
"Parent Relationship": "Outer",
|
"Parent Relationship": "Outer",
|
||||||
|
"Parallel Aware": false,
|
||||||
"Group Key": ["remote_scan.l_quantity"],
|
"Group Key": ["remote_scan.l_quantity"],
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "Custom Scan",
|
"Node Type": "Custom Scan",
|
||||||
"Parent Relationship": "Outer",
|
"Parent Relationship": "Outer",
|
||||||
"Custom Plan Provider": "Citus Real-Time",
|
"Custom Plan Provider": "Citus Real-Time",
|
||||||
|
"Parallel Aware": false,
|
||||||
"Distributed Query": {
|
"Distributed Query": {
|
||||||
"Job": {
|
"Job": {
|
||||||
"Task Count": 8,
|
"Task Count": 8,
|
||||||
|
@ -86,11 +90,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
||||||
"Plan": {
|
"Plan": {
|
||||||
"Node Type": "Aggregate",
|
"Node Type": "Aggregate",
|
||||||
"Strategy": "Hashed",
|
"Strategy": "Hashed",
|
||||||
|
"Partial Mode": "Simple",
|
||||||
|
"Parallel Aware": false,
|
||||||
"Group Key": ["l_quantity"],
|
"Group Key": ["l_quantity"],
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "Seq Scan",
|
"Node Type": "Seq Scan",
|
||||||
"Parent Relationship": "Outer",
|
"Parent Relationship": "Outer",
|
||||||
|
"Parallel Aware": false,
|
||||||
"Relation Name": "lineitem_290001",
|
"Relation Name": "lineitem_290001",
|
||||||
"Alias": "lineitem"
|
"Alias": "lineitem"
|
||||||
}
|
}
|
||||||
|
@ -124,15 +131,18 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
||||||
<Query>
|
<Query>
|
||||||
<Plan>
|
<Plan>
|
||||||
<Node-Type>Sort</Node-Type>
|
<Node-Type>Sort</Node-Type>
|
||||||
|
<Parallel-Aware>false</Parallel-Aware>
|
||||||
<Sort-Key>
|
<Sort-Key>
|
||||||
<Item>COALESCE((sum((COALESCE((sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)</Item>
|
<Item>COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)</Item>
|
||||||
<Item>remote_scan.l_quantity</Item>
|
<Item>remote_scan.l_quantity</Item>
|
||||||
</Sort-Key>
|
</Sort-Key>
|
||||||
<Plans>
|
<Plans>
|
||||||
<Plan>
|
<Plan>
|
||||||
<Node-Type>Aggregate</Node-Type>
|
<Node-Type>Aggregate</Node-Type>
|
||||||
<Strategy>Hashed</Strategy>
|
<Strategy>Hashed</Strategy>
|
||||||
|
<Partial-Mode>Simple</Partial-Mode>
|
||||||
<Parent-Relationship>Outer</Parent-Relationship>
|
<Parent-Relationship>Outer</Parent-Relationship>
|
||||||
|
<Parallel-Aware>false</Parallel-Aware>
|
||||||
<Group-Key>
|
<Group-Key>
|
||||||
<Item>remote_scan.l_quantity</Item>
|
<Item>remote_scan.l_quantity</Item>
|
||||||
</Group-Key>
|
</Group-Key>
|
||||||
|
@ -141,6 +151,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
||||||
<Node-Type>Custom Scan</Node-Type>
|
<Node-Type>Custom Scan</Node-Type>
|
||||||
<Parent-Relationship>Outer</Parent-Relationship>
|
<Parent-Relationship>Outer</Parent-Relationship>
|
||||||
<Custom-Plan-Provider>Citus Real-Time</Custom-Plan-Provider>
|
<Custom-Plan-Provider>Citus Real-Time</Custom-Plan-Provider>
|
||||||
|
<Parallel-Aware>false</Parallel-Aware>
|
||||||
<Distributed-Query>
|
<Distributed-Query>
|
||||||
<Job>
|
<Job>
|
||||||
<Task-Count>8</Task-Count>
|
<Task-Count>8</Task-Count>
|
||||||
|
@ -154,6 +165,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
||||||
<Plan>
|
<Plan>
|
||||||
<Node-Type>Aggregate</Node-Type>
|
<Node-Type>Aggregate</Node-Type>
|
||||||
<Strategy>Hashed</Strategy>
|
<Strategy>Hashed</Strategy>
|
||||||
|
<Partial-Mode>Simple</Partial-Mode>
|
||||||
|
<Parallel-Aware>false</Parallel-Aware>
|
||||||
<Group-Key>
|
<Group-Key>
|
||||||
<Item>l_quantity</Item>
|
<Item>l_quantity</Item>
|
||||||
</Group-Key>
|
</Group-Key>
|
||||||
|
@ -161,6 +174,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
||||||
<Plan>
|
<Plan>
|
||||||
<Node-Type>Seq Scan</Node-Type>
|
<Node-Type>Seq Scan</Node-Type>
|
||||||
<Parent-Relationship>Outer</Parent-Relationship>
|
<Parent-Relationship>Outer</Parent-Relationship>
|
||||||
|
<Parallel-Aware>false</Parallel-Aware>
|
||||||
<Relation-Name>lineitem_290001</Relation-Name>
|
<Relation-Name>lineitem_290001</Relation-Name>
|
||||||
<Alias>lineitem</Alias>
|
<Alias>lineitem</Alias>
|
||||||
</Plan>
|
</Plan>
|
||||||
|
@ -191,19 +205,23 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
||||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||||
- Plan:
|
- Plan:
|
||||||
Node Type: "Sort"
|
Node Type: "Sort"
|
||||||
|
Parallel Aware: false
|
||||||
Sort Key:
|
Sort Key:
|
||||||
- "COALESCE((sum((COALESCE((sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)"
|
- "COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)"
|
||||||
- "remote_scan.l_quantity"
|
- "remote_scan.l_quantity"
|
||||||
Plans:
|
Plans:
|
||||||
- Node Type: "Aggregate"
|
- Node Type: "Aggregate"
|
||||||
Strategy: "Hashed"
|
Strategy: "Hashed"
|
||||||
|
Partial Mode: "Simple"
|
||||||
Parent Relationship: "Outer"
|
Parent Relationship: "Outer"
|
||||||
|
Parallel Aware: false
|
||||||
Group Key:
|
Group Key:
|
||||||
- "remote_scan.l_quantity"
|
- "remote_scan.l_quantity"
|
||||||
Plans:
|
Plans:
|
||||||
- Node Type: "Custom Scan"
|
- Node Type: "Custom Scan"
|
||||||
Parent Relationship: "Outer"
|
Parent Relationship: "Outer"
|
||||||
Custom Plan Provider: "Citus Real-Time"
|
Custom Plan Provider: "Citus Real-Time"
|
||||||
|
Parallel Aware: false
|
||||||
Distributed Query:
|
Distributed Query:
|
||||||
Job:
|
Job:
|
||||||
Task Count: 8
|
Task Count: 8
|
||||||
|
@ -214,11 +232,14 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
||||||
- Plan:
|
- Plan:
|
||||||
Node Type: "Aggregate"
|
Node Type: "Aggregate"
|
||||||
Strategy: "Hashed"
|
Strategy: "Hashed"
|
||||||
|
Partial Mode: "Simple"
|
||||||
|
Parallel Aware: false
|
||||||
Group Key:
|
Group Key:
|
||||||
- "l_quantity"
|
- "l_quantity"
|
||||||
Plans:
|
Plans:
|
||||||
- Node Type: "Seq Scan"
|
- Node Type: "Seq Scan"
|
||||||
Parent Relationship: "Outer"
|
Parent Relationship: "Outer"
|
||||||
|
Parallel Aware: false
|
||||||
Relation Name: "lineitem_290001"
|
Relation Name: "lineitem_290001"
|
||||||
Alias: "lineitem"
|
Alias: "lineitem"
|
||||||
|
|
||||||
|
@ -227,7 +248,7 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
|
||||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||||
Sort
|
Sort
|
||||||
Sort Key: COALESCE((sum((COALESCE((sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity
|
Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: remote_scan.l_quantity
|
Group Key: remote_scan.l_quantity
|
||||||
-> Custom Scan (Citus Real-Time)
|
-> Custom Scan (Citus Real-Time)
|
||||||
|
@ -242,7 +263,7 @@ Sort
|
||||||
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / sum(remote_scan."?column?_2")))
|
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
|
||||||
-> Custom Scan (Citus Real-Time)
|
-> Custom Scan (Citus Real-Time)
|
||||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
|
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
|
||||||
Task Count: 8
|
Task Count: 8
|
||||||
|
@ -344,7 +365,7 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem
|
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem
|
||||||
HAVING sum(l_quantity) > 100;
|
HAVING sum(l_quantity) > 100;
|
||||||
Aggregate
|
Aggregate
|
||||||
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / sum(remote_scan."?column?_2")))
|
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
|
||||||
Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
|
Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
|
||||||
-> Custom Scan (Citus Real-Time)
|
-> Custom Scan (Citus Real-Time)
|
||||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
|
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
|
||||||
|
@ -410,11 +431,15 @@ Aggregate
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> GroupAggregate
|
-> GroupAggregate
|
||||||
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
Group Key: (((NULL::user_composite_type)).tenant_id), (((NULL::user_composite_type)).user_id)
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
Sort Key: (((NULL::user_composite_type)).tenant_id), (((NULL::user_composite_type)).user_id)
|
||||||
-> Result
|
-> Nested Loop
|
||||||
One-Time Filter: false
|
Join Filter: ((NULL::user_composite_type) = events.composite_id)
|
||||||
|
-> Result
|
||||||
|
One-Time Filter: false
|
||||||
|
-> Seq Scan on events_1400027 events
|
||||||
|
Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
|
||||||
-- Union and left join subquery pushdown
|
-- Union and left join subquery pushdown
|
||||||
EXPLAIN (COSTS OFF)
|
EXPLAIN (COSTS OFF)
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -485,29 +510,40 @@ HashAggregate
|
||||||
Tasks Shown: One of 4
|
Tasks Shown: One of 4
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> HashAggregate
|
-> GroupAggregate
|
||||||
Group Key: COALESCE(subquery_2.hasdone, 'Has not done paying'::text)
|
Group Key: subquery_top.hasdone
|
||||||
-> GroupAggregate
|
-> Sort
|
||||||
Group Key: ((composite_id).tenant_id), ((composite_id).user_id), subquery_2.hasdone
|
Sort Key: subquery_top.hasdone
|
||||||
-> Sort
|
-> Subquery Scan on subquery_top
|
||||||
Sort Key: ((composite_id).tenant_id), ((composite_id).user_id), subquery_2.hasdone
|
-> GroupAggregate
|
||||||
-> Hash Left Join
|
Group Key: (((NULL::user_composite_type)).tenant_id), (((NULL::user_composite_type)).user_id), subquery_2.hasdone
|
||||||
Hash Cond: (composite_id = subquery_2.composite_id)
|
-> Sort
|
||||||
-> Unique
|
Sort Key: (((NULL::user_composite_type)).tenant_id), (((NULL::user_composite_type)).user_id), subquery_2.hasdone
|
||||||
-> Sort
|
-> Hash Left Join
|
||||||
Sort Key: ((composite_id).tenant_id), ((composite_id).user_id), composite_id, ('action=>1'::text), event_time
|
Hash Cond: ((NULL::user_composite_type) = subquery_2.composite_id)
|
||||||
-> Append
|
|
||||||
-> Result
|
|
||||||
One-Time Filter: false
|
|
||||||
-> Result
|
|
||||||
One-Time Filter: false
|
|
||||||
-> Hash
|
|
||||||
-> Subquery Scan on subquery_2
|
|
||||||
-> Unique
|
-> Unique
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: ((events.composite_id).tenant_id), ((events.composite_id).user_id)
|
Sort Key: (((NULL::user_composite_type)).tenant_id), (((NULL::user_composite_type)).user_id), (NULL::user_composite_type), ('action=>1'::text), events.event_time
|
||||||
-> Seq Scan on events_1400027 events
|
-> Append
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
|
-> Nested Loop
|
||||||
|
Join Filter: ((NULL::user_composite_type) = events.composite_id)
|
||||||
|
-> Result
|
||||||
|
One-Time Filter: false
|
||||||
|
-> Seq Scan on events_1400027 events
|
||||||
|
Filter: ((event_type)::text = 'click'::text)
|
||||||
|
-> Nested Loop
|
||||||
|
Join Filter: ((NULL::user_composite_type) = events_1.composite_id)
|
||||||
|
-> Result
|
||||||
|
One-Time Filter: false
|
||||||
|
-> Seq Scan on events_1400027 events_1
|
||||||
|
Filter: ((event_type)::text = 'submit'::text)
|
||||||
|
-> Hash
|
||||||
|
-> Subquery Scan on subquery_2
|
||||||
|
-> Unique
|
||||||
|
-> Sort
|
||||||
|
Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id)
|
||||||
|
-> Seq Scan on events_1400027 events_2
|
||||||
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
|
||||||
-- Union, left join and having subquery pushdown
|
-- Union, left join and having subquery pushdown
|
||||||
EXPLAIN (COSTS OFF)
|
EXPLAIN (COSTS OFF)
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -643,22 +679,23 @@ Limit
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Limit
|
-> Limit
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: (max(lastseen)) DESC
|
Sort Key: (max(users.lastseen)) DESC
|
||||||
-> GroupAggregate
|
-> GroupAggregate
|
||||||
Group Key: ((composite_id).tenant_id), ((composite_id).user_id)
|
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: ((composite_id).tenant_id), ((composite_id).user_id)
|
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
||||||
-> Nested Loop Left Join
|
-> Nested Loop Left Join
|
||||||
-> Limit
|
-> Limit
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: lastseen DESC
|
Sort Key: users.lastseen DESC
|
||||||
-> Result
|
-> Subquery Scan on users
|
||||||
One-Time Filter: false
|
-> Result
|
||||||
|
One-Time Filter: false
|
||||||
-> Limit
|
-> Limit
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: events.event_time DESC
|
Sort Key: events.event_time DESC
|
||||||
-> Seq Scan on events_1400027 events
|
-> Seq Scan on events_1400027 events
|
||||||
Filter: (composite_id = composite_id)
|
Filter: (composite_id = users.composite_id)
|
||||||
-- Test all tasks output
|
-- Test all tasks output
|
||||||
SET citus.explain_all_tasks TO on;
|
SET citus.explain_all_tasks TO on;
|
||||||
EXPLAIN (COSTS FALSE)
|
EXPLAIN (COSTS FALSE)
|
||||||
|
@ -736,11 +773,14 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
|
||||||
"Plan": {
|
"Plan": {
|
||||||
"Node Type": "Aggregate",
|
"Node Type": "Aggregate",
|
||||||
"Strategy": "Plain",
|
"Strategy": "Plain",
|
||||||
|
"Partial Mode": "Simple",
|
||||||
|
"Parallel Aware": false,
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "Custom Scan",
|
"Node Type": "Custom Scan",
|
||||||
"Parent Relationship": "Outer",
|
"Parent Relationship": "Outer",
|
||||||
"Custom Plan Provider": "Citus Task-Tracker",
|
"Custom Plan Provider": "Citus Task-Tracker",
|
||||||
|
"Parallel Aware": false,
|
||||||
"Distributed Query": {
|
"Distributed Query": {
|
||||||
"Job": {
|
"Job": {
|
||||||
"Task Count": 1,
|
"Task Count": 1,
|
||||||
|
@ -782,11 +822,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
|
||||||
<Plan>
|
<Plan>
|
||||||
<Node-Type>Aggregate</Node-Type>
|
<Node-Type>Aggregate</Node-Type>
|
||||||
<Strategy>Plain</Strategy>
|
<Strategy>Plain</Strategy>
|
||||||
|
<Partial-Mode>Simple</Partial-Mode>
|
||||||
|
<Parallel-Aware>false</Parallel-Aware>
|
||||||
<Plans>
|
<Plans>
|
||||||
<Plan>
|
<Plan>
|
||||||
<Node-Type>Custom Scan</Node-Type>
|
<Node-Type>Custom Scan</Node-Type>
|
||||||
<Parent-Relationship>Outer</Parent-Relationship>
|
<Parent-Relationship>Outer</Parent-Relationship>
|
||||||
<Custom-Plan-Provider>Citus Task-Tracker</Custom-Plan-Provider>
|
<Custom-Plan-Provider>Citus Task-Tracker</Custom-Plan-Provider>
|
||||||
|
<Parallel-Aware>false</Parallel-Aware>
|
||||||
<Distributed-Query>
|
<Distributed-Query>
|
||||||
<Job>
|
<Job>
|
||||||
<Task-Count>1</Task-Count>
|
<Task-Count>1</Task-Count>
|
||||||
|
@ -839,10 +882,13 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
||||||
- Plan:
|
- Plan:
|
||||||
Node Type: "Aggregate"
|
Node Type: "Aggregate"
|
||||||
Strategy: "Plain"
|
Strategy: "Plain"
|
||||||
|
Partial Mode: "Simple"
|
||||||
|
Parallel Aware: false
|
||||||
Plans:
|
Plans:
|
||||||
- Node Type: "Custom Scan"
|
- Node Type: "Custom Scan"
|
||||||
Parent Relationship: "Outer"
|
Parent Relationship: "Outer"
|
||||||
Custom Plan Provider: "Citus Task-Tracker"
|
Custom Plan Provider: "Citus Task-Tracker"
|
||||||
|
Parallel Aware: false
|
||||||
Distributed Query:
|
Distributed Query:
|
||||||
Job:
|
Job:
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
|
@ -855,18 +901,19 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
||||||
Merge Task Count: 1
|
Merge Task Count: 1
|
||||||
-- test parallel aggregates
|
-- test parallel aggregates
|
||||||
SET parallel_setup_cost=0;
|
SET parallel_setup_cost=0;
|
||||||
ERROR: unrecognized configuration parameter "parallel_setup_cost"
|
|
||||||
SET parallel_tuple_cost=0;
|
SET parallel_tuple_cost=0;
|
||||||
ERROR: unrecognized configuration parameter "parallel_tuple_cost"
|
|
||||||
SET min_parallel_relation_size=0;
|
SET min_parallel_relation_size=0;
|
||||||
ERROR: unrecognized configuration parameter "min_parallel_relation_size"
|
SET min_parallel_table_scan_size=0;
|
||||||
|
ERROR: unrecognized configuration parameter "min_parallel_table_scan_size"
|
||||||
SET max_parallel_workers_per_gather=4;
|
SET max_parallel_workers_per_gather=4;
|
||||||
ERROR: unrecognized configuration parameter "max_parallel_workers_per_gather"
|
|
||||||
-- ensure local plans display correctly
|
-- ensure local plans display correctly
|
||||||
CREATE TABLE lineitem_clone (LIKE lineitem);
|
CREATE TABLE lineitem_clone (LIKE lineitem);
|
||||||
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone;
|
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone;
|
||||||
Aggregate
|
Finalize Aggregate
|
||||||
-> Seq Scan on lineitem_clone
|
-> Gather
|
||||||
|
Workers Planned: 3
|
||||||
|
-> Partial Aggregate
|
||||||
|
-> Parallel Seq Scan on lineitem_clone
|
||||||
-- ensure distributed plans don't break
|
-- ensure distributed plans don't break
|
||||||
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem;
|
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem;
|
||||||
Aggregate
|
Aggregate
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2,6 +2,13 @@
|
||||||
-- MULTI_JOIN_ORDER_ADDITIONAL
|
-- MULTI_JOIN_ORDER_ADDITIONAL
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000;
|
||||||
|
-- print whether we're running on 9.5 to make version-specific tests clear
|
||||||
|
SELECT substring(version(), '\d+(?:\.\d+)?') = '9.5' AS is_95;
|
||||||
|
is_95
|
||||||
|
-------
|
||||||
|
f
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- Set configuration to print table join order and pruned shards
|
-- Set configuration to print table join order and pruned shards
|
||||||
SET citus.explain_distributed_queries TO off;
|
SET citus.explain_distributed_queries TO off;
|
||||||
SET citus.log_multi_join_order TO TRUE;
|
SET citus.log_multi_join_order TO TRUE;
|
||||||
|
|
|
@ -0,0 +1,260 @@
|
||||||
|
--
|
||||||
|
-- MULTI_JOIN_ORDER_ADDITIONAL
|
||||||
|
--
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000;
|
||||||
|
-- print whether we're running on 9.5 to make version-specific tests clear
|
||||||
|
SELECT substring(version(), '\d+(?:\.\d+)?') = '9.5' AS is_95;
|
||||||
|
is_95
|
||||||
|
-------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Set configuration to print table join order and pruned shards
|
||||||
|
SET citus.explain_distributed_queries TO off;
|
||||||
|
SET citus.log_multi_join_order TO TRUE;
|
||||||
|
SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise
|
||||||
|
SET client_min_messages TO DEBUG2;
|
||||||
|
-- Create new table definitions for use in testing in distributed planning and
|
||||||
|
-- execution functionality. Also create indexes to boost performance.
|
||||||
|
CREATE TABLE lineitem_hash (
|
||||||
|
l_orderkey bigint not null,
|
||||||
|
l_partkey integer not null,
|
||||||
|
l_suppkey integer not null,
|
||||||
|
l_linenumber integer not null,
|
||||||
|
l_quantity decimal(15, 2) not null,
|
||||||
|
l_extendedprice decimal(15, 2) not null,
|
||||||
|
l_discount decimal(15, 2) not null,
|
||||||
|
l_tax decimal(15, 2) not null,
|
||||||
|
l_returnflag char(1) not null,
|
||||||
|
l_linestatus char(1) not null,
|
||||||
|
l_shipdate date not null,
|
||||||
|
l_commitdate date not null,
|
||||||
|
l_receiptdate date not null,
|
||||||
|
l_shipinstruct char(25) not null,
|
||||||
|
l_shipmode char(10) not null,
|
||||||
|
l_comment varchar(44) not null,
|
||||||
|
PRIMARY KEY(l_orderkey, l_linenumber) );
|
||||||
|
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "lineitem_hash_pkey" for table "lineitem_hash"
|
||||||
|
DEBUG: building index "lineitem_hash_pkey" on table "lineitem_hash"
|
||||||
|
DEBUG: creating and filling new WAL file
|
||||||
|
DEBUG: done creating and filling new WAL file
|
||||||
|
SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
|
||||||
|
master_create_distributed_table
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT master_create_worker_shards('lineitem_hash', 2, 1);
|
||||||
|
master_create_worker_shards
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CREATE INDEX lineitem_hash_time_index ON lineitem_hash (l_shipdate);
|
||||||
|
DEBUG: building index "lineitem_hash_time_index" on table "lineitem_hash"
|
||||||
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
|
CREATE TABLE orders_hash (
|
||||||
|
o_orderkey bigint not null,
|
||||||
|
o_custkey integer not null,
|
||||||
|
o_orderstatus char(1) not null,
|
||||||
|
o_totalprice decimal(15,2) not null,
|
||||||
|
o_orderdate date not null,
|
||||||
|
o_orderpriority char(15) not null,
|
||||||
|
o_clerk char(15) not null,
|
||||||
|
o_shippriority integer not null,
|
||||||
|
o_comment varchar(79) not null,
|
||||||
|
PRIMARY KEY(o_orderkey) );
|
||||||
|
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "orders_hash_pkey" for table "orders_hash"
|
||||||
|
DEBUG: building index "orders_hash_pkey" on table "orders_hash"
|
||||||
|
SELECT master_create_distributed_table('orders_hash', 'o_orderkey', 'hash');
|
||||||
|
master_create_distributed_table
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT master_create_worker_shards('orders_hash', 2, 1);
|
||||||
|
master_create_worker_shards
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
CREATE TABLE customer_hash (
|
||||||
|
c_custkey integer not null,
|
||||||
|
c_name varchar(25) not null,
|
||||||
|
c_address varchar(40) not null,
|
||||||
|
c_nationkey integer not null,
|
||||||
|
c_phone char(15) not null,
|
||||||
|
c_acctbal decimal(15,2) not null,
|
||||||
|
c_mktsegment char(10) not null,
|
||||||
|
c_comment varchar(117) not null);
|
||||||
|
SELECT master_create_distributed_table('customer_hash', 'c_custkey', 'hash');
|
||||||
|
master_create_distributed_table
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT master_create_worker_shards('customer_hash', 2, 1);
|
||||||
|
master_create_worker_shards
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- The following query checks that we can correctly handle self-joins
|
||||||
|
EXPLAIN SELECT l1.l_quantity FROM lineitem l1, lineitem l2
|
||||||
|
WHERE l1.l_orderkey = l2.l_orderkey AND l1.l_quantity > 5;
|
||||||
|
LOG: join order: [ "lineitem" ][ local partition join "lineitem" ]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [10560,12036]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [12036,13473]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [1509,4964] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [1509,4964] and [10560,12036]
|
||||||
|
DEBUG: join prunable for intervals [1509,4964] and [12036,13473]
|
||||||
|
DEBUG: join prunable for intervals [1509,4964] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [10560,12036]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [12036,13473]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [10560,12036]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [12036,13473]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [1509,4964]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [12036,13473]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [1509,4964]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [1509,4964]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [1509,4964]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
|
||||||
|
QUERY PLAN
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
-- Update configuration to treat lineitem and orders tables as large
|
||||||
|
SET citus.large_table_shard_count TO 2;
|
||||||
|
SET client_min_messages TO LOG;
|
||||||
|
-- The following queries check that we correctly handle joins and OR clauses. In
|
||||||
|
-- particular, these queries check that we factorize out OR clauses if possible,
|
||||||
|
-- and that we default to a cartesian product otherwise.
|
||||||
|
EXPLAIN SELECT count(*) FROM lineitem, orders
|
||||||
|
WHERE (l_orderkey = o_orderkey AND l_quantity > 5)
|
||||||
|
OR (l_orderkey = o_orderkey AND l_quantity < 10);
|
||||||
|
LOG: join order: [ "lineitem" ][ local partition join "orders" ]
|
||||||
|
QUERY PLAN
|
||||||
|
--------------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
EXPLAIN SELECT l_quantity FROM lineitem, orders
|
||||||
|
WHERE (l_orderkey = o_orderkey OR l_quantity > 5);
|
||||||
|
LOG: join order: [ "lineitem" ][ cartesian product "orders" ]
|
||||||
|
ERROR: cannot perform distributed planning on this query
|
||||||
|
DETAIL: Cartesian products are currently unsupported
|
||||||
|
-- The below queries modify the partition method in pg_dist_partition. We thus
|
||||||
|
-- begin a transaction here so the changes don't impact any other parallel
|
||||||
|
-- running tests.
|
||||||
|
BEGIN;
|
||||||
|
-- Validate that we take into account the partition method when building the
|
||||||
|
-- join-order plan.
|
||||||
|
EXPLAIN SELECT count(*) FROM orders, lineitem_hash
|
||||||
|
WHERE o_orderkey = l_orderkey;
|
||||||
|
LOG: join order: [ "orders" ][ single partition join "lineitem_hash" ]
|
||||||
|
QUERY PLAN
|
||||||
|
--------------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- Verify we handle local joins between two hash-partitioned tables.
|
||||||
|
EXPLAIN SELECT count(*) FROM orders_hash, lineitem_hash
|
||||||
|
WHERE o_orderkey = l_orderkey;
|
||||||
|
LOG: join order: [ "orders_hash" ][ local partition join "lineitem_hash" ]
|
||||||
|
QUERY PLAN
|
||||||
|
--------------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- Validate that we can handle broadcast joins with hash-partitioned tables.
|
||||||
|
EXPLAIN SELECT count(*) FROM customer_hash, nation
|
||||||
|
WHERE c_nationkey = n_nationkey;
|
||||||
|
LOG: join order: [ "customer_hash" ][ broadcast join "nation" ]
|
||||||
|
QUERY PLAN
|
||||||
|
--------------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- Update the large table shard count for all the following tests.
|
||||||
|
SET citus.large_table_shard_count TO 1;
|
||||||
|
-- Validate that we don't use a single-partition join method for a hash
|
||||||
|
-- re-partitioned table, thus preventing a partition of just the customer table.
|
||||||
|
EXPLAIN SELECT count(*) FROM orders, lineitem, customer
|
||||||
|
WHERE o_custkey = l_partkey AND o_custkey = c_nationkey;
|
||||||
|
LOG: join order: [ "orders" ][ dual partition join "lineitem" ][ dual partition join "customer" ]
|
||||||
|
QUERY PLAN
|
||||||
|
--------------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- Validate that we don't chose a single-partition join method with a
|
||||||
|
-- hash-partitioned base table
|
||||||
|
EXPLAIN SELECT count(*) FROM orders, customer_hash
|
||||||
|
WHERE c_custkey = o_custkey;
|
||||||
|
LOG: join order: [ "orders" ][ dual partition join "customer_hash" ]
|
||||||
|
QUERY PLAN
|
||||||
|
--------------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- Validate that we can re-partition a hash partitioned table to join with a
|
||||||
|
-- range partitioned one.
|
||||||
|
EXPLAIN SELECT count(*) FROM orders_hash, customer
|
||||||
|
WHERE c_custkey = o_custkey;
|
||||||
|
LOG: join order: [ "orders_hash" ][ single partition join "customer" ]
|
||||||
|
QUERY PLAN
|
||||||
|
--------------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
-- Reset client logging level to its previous value
|
||||||
|
SET client_min_messages TO NOTICE;
|
||||||
|
DROP TABLE lineitem_hash;
|
||||||
|
DROP TABLE orders_hash;
|
||||||
|
DROP TABLE customer_hash;
|
|
@ -7,17 +7,18 @@
|
||||||
-- executor here, as we cannot run repartition jobs with real time executor.
|
-- executor here, as we cannot run repartition jobs with real time executor.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
|
||||||
SET citus.enable_unique_job_ids TO off;
|
SET citus.enable_unique_job_ids TO off;
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
major_version
|
||||||
|
---------------
|
||||||
|
10
|
||||||
|
(1 row)
|
||||||
|
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SET client_min_messages TO DEBUG4;
|
SET client_min_messages TO DEBUG4;
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
SET citus.task_executor_type TO 'task-tracker';
|
SET citus.task_executor_type TO 'task-tracker';
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
-- Debug4 log messages display jobIds within them. We explicitly set the jobId
|
-- Debug4 log messages display jobIds within them. We explicitly set the jobId
|
||||||
-- sequence here so that the regression output becomes independent of the number
|
-- sequence here so that the regression output becomes independent of the number
|
||||||
-- of jobs executed prior to running this test.
|
-- of jobs executed prior to running this test.
|
||||||
|
@ -40,7 +41,6 @@ GROUP BY
|
||||||
l_partkey, o_orderkey
|
l_partkey, o_orderkey
|
||||||
ORDER BY
|
ORDER BY
|
||||||
l_partkey, o_orderkey;
|
l_partkey, o_orderkey;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
|
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
|
||||||
DEBUG: join prunable for intervals [1509,4964] and [8997,14946]
|
DEBUG: join prunable for intervals [1509,4964] and [8997,14946]
|
||||||
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||||
|
@ -112,7 +112,6 @@ DEBUG: completed cleanup query for job 2
|
||||||
DEBUG: completed cleanup query for job 2
|
DEBUG: completed cleanup query for job 2
|
||||||
DEBUG: completed cleanup query for job 1
|
DEBUG: completed cleanup query for job 1
|
||||||
DEBUG: completed cleanup query for job 1
|
DEBUG: completed cleanup query for job 1
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
l_partkey | o_orderkey | count
|
l_partkey | o_orderkey | count
|
||||||
-----------+------------+-------
|
-----------+------------+-------
|
||||||
18 | 12005 | 1
|
18 | 12005 | 1
|
||||||
|
@ -157,7 +156,6 @@ GROUP BY
|
||||||
l_partkey, o_orderkey
|
l_partkey, o_orderkey
|
||||||
ORDER BY
|
ORDER BY
|
||||||
l_partkey, o_orderkey;
|
l_partkey, o_orderkey;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: generated sql query for task 2
|
DEBUG: generated sql query for task 2
|
||||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
||||||
DEBUG: generated sql query for task 4
|
DEBUG: generated sql query for task 4
|
||||||
|
@ -234,13 +232,10 @@ DEBUG: completed cleanup query for job 4
|
||||||
DEBUG: completed cleanup query for job 4
|
DEBUG: completed cleanup query for job 4
|
||||||
DEBUG: completed cleanup query for job 5
|
DEBUG: completed cleanup query for job 5
|
||||||
DEBUG: completed cleanup query for job 5
|
DEBUG: completed cleanup query for job 5
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
l_partkey | o_orderkey | count
|
l_partkey | o_orderkey | count
|
||||||
-----------+------------+-------
|
-----------+------------+-------
|
||||||
(0 rows)
|
(0 rows)
|
||||||
|
|
||||||
-- Reset client logging level to its previous value
|
-- Reset client logging level to its previous value
|
||||||
SET client_min_messages TO NOTICE;
|
SET client_min_messages TO NOTICE;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
COMMIT;
|
COMMIT;
|
||||||
|
|
|
@ -0,0 +1,254 @@
|
||||||
|
--
|
||||||
|
-- MULTI_LARGE_TABLE_PLANNING
|
||||||
|
--
|
||||||
|
-- Tests that cover large table join planning. Note that we explicitly start a
|
||||||
|
-- transaction block here so that we don't emit debug messages with changing
|
||||||
|
-- transaction ids in them. Also, we set the executor type to task tracker
|
||||||
|
-- executor here, as we cannot run repartition jobs with real time executor.
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
|
||||||
|
SET citus.enable_unique_job_ids TO off;
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
major_version
|
||||||
|
---------------
|
||||||
|
9
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
SET client_min_messages TO DEBUG4;
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
SET citus.large_table_shard_count TO 2;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
SET citus.task_executor_type TO 'task-tracker';
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
-- Debug4 log messages display jobIds within them. We explicitly set the jobId
|
||||||
|
-- sequence here so that the regression output becomes independent of the number
|
||||||
|
-- of jobs executed prior to running this test.
|
||||||
|
-- Multi-level repartition join to verify our projection columns are correctly
|
||||||
|
-- referenced and propagated across multiple repartition jobs. The test also
|
||||||
|
-- validates that only the minimal necessary projection columns are transferred
|
||||||
|
-- between jobs.
|
||||||
|
SELECT
|
||||||
|
l_partkey, o_orderkey, count(*)
|
||||||
|
FROM
|
||||||
|
lineitem, part, orders, customer
|
||||||
|
WHERE
|
||||||
|
l_orderkey = o_orderkey AND
|
||||||
|
l_partkey = p_partkey AND
|
||||||
|
c_custkey = o_custkey AND
|
||||||
|
(l_quantity > 5.0 OR l_extendedprice > 1200.0) AND
|
||||||
|
p_size > 8 AND o_totalprice > 10.0 AND
|
||||||
|
c_acctbal < 5000.0 AND l_partkey < 1000
|
||||||
|
GROUP BY
|
||||||
|
l_partkey, o_orderkey
|
||||||
|
ORDER BY
|
||||||
|
l_partkey, o_orderkey;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [1509,4964] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
|
DEBUG: generated sql query for task 3
|
||||||
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
|
DEBUG: generated sql query for task 6
|
||||||
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
|
DEBUG: generated sql query for task 9
|
||||||
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
|
DEBUG: generated sql query for task 12
|
||||||
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
|
DEBUG: generated sql query for task 15
|
||||||
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
|
DEBUG: generated sql query for task 18
|
||||||
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
|
DEBUG: generated sql query for task 21
|
||||||
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
|
DEBUG: generated sql query for task 24
|
||||||
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
|
DEBUG: assigned task 3 to node localhost:57638
|
||||||
|
DEBUG: assigned task 12 to node localhost:57637
|
||||||
|
DEBUG: assigned task 9 to node localhost:57638
|
||||||
|
DEBUG: assigned task 18 to node localhost:57637
|
||||||
|
DEBUG: assigned task 15 to node localhost:57638
|
||||||
|
DEBUG: assigned task 24 to node localhost:57637
|
||||||
|
DEBUG: assigned task 21 to node localhost:57638
|
||||||
|
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||||
|
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||||
|
DEBUG: generated sql query for task 3
|
||||||
|
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000025".intermediate_column_1_0, "pg_merge_job_0001.task_000025".intermediate_column_1_1, "pg_merge_job_0001.task_000025".intermediate_column_1_2, "pg_merge_job_0001.task_000025".intermediate_column_1_3, "pg_merge_job_0001.task_000025".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000025 "pg_merge_job_0001.task_000025" JOIN part_290011 part ON (("pg_merge_job_0001.task_000025".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||||
|
DEBUG: generated sql query for task 6
|
||||||
|
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000034".intermediate_column_1_0, "pg_merge_job_0001.task_000034".intermediate_column_1_1, "pg_merge_job_0001.task_000034".intermediate_column_1_2, "pg_merge_job_0001.task_000034".intermediate_column_1_3, "pg_merge_job_0001.task_000034".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000034 "pg_merge_job_0001.task_000034" JOIN part_280002 part ON (("pg_merge_job_0001.task_000034".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||||
|
DEBUG: pruning merge fetch taskId 1
|
||||||
|
DETAIL: Creating dependency on merge taskId 25
|
||||||
|
DEBUG: pruning merge fetch taskId 4
|
||||||
|
DETAIL: Creating dependency on merge taskId 34
|
||||||
|
DEBUG: assigned task 3 to node localhost:57637
|
||||||
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
|
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||||
|
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||||
|
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||||
|
DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
||||||
|
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||||
|
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||||
|
DEBUG: generated sql query for task 3
|
||||||
|
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000007".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000007".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000007 "pg_merge_job_0002.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000007".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000007".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000007".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000007".intermediate_column_2_0, "pg_merge_job_0002.task_000007".intermediate_column_2_1"
|
||||||
|
DEBUG: generated sql query for task 6
|
||||||
|
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000010".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000010".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000010 "pg_merge_job_0002.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000010".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000010".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000010".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000010".intermediate_column_2_0, "pg_merge_job_0002.task_000010".intermediate_column_2_1"
|
||||||
|
DEBUG: generated sql query for task 9
|
||||||
|
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000013".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000013".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000013 "pg_merge_job_0002.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000013".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000013".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000013".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000013".intermediate_column_2_0, "pg_merge_job_0002.task_000013".intermediate_column_2_1"
|
||||||
|
DEBUG: pruning merge fetch taskId 1
|
||||||
|
DETAIL: Creating dependency on merge taskId 7
|
||||||
|
DEBUG: pruning merge fetch taskId 4
|
||||||
|
DETAIL: Creating dependency on merge taskId 10
|
||||||
|
DEBUG: pruning merge fetch taskId 7
|
||||||
|
DETAIL: Creating dependency on merge taskId 13
|
||||||
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
|
DEBUG: assigned task 9 to node localhost:57638
|
||||||
|
DEBUG: assigned task 3 to node localhost:57637
|
||||||
|
DEBUG: completed cleanup query for job 3
|
||||||
|
DEBUG: completed cleanup query for job 3
|
||||||
|
DEBUG: completed cleanup query for job 2
|
||||||
|
DEBUG: completed cleanup query for job 2
|
||||||
|
DEBUG: completed cleanup query for job 1
|
||||||
|
DEBUG: completed cleanup query for job 1
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
l_partkey | o_orderkey | count
|
||||||
|
-----------+------------+-------
|
||||||
|
18 | 12005 | 1
|
||||||
|
79 | 5121 | 1
|
||||||
|
91 | 2883 | 1
|
||||||
|
222 | 9413 | 1
|
||||||
|
278 | 1287 | 1
|
||||||
|
309 | 2374 | 1
|
||||||
|
318 | 321 | 1
|
||||||
|
321 | 5984 | 1
|
||||||
|
337 | 10403 | 1
|
||||||
|
350 | 13698 | 1
|
||||||
|
358 | 4323 | 1
|
||||||
|
364 | 9347 | 1
|
||||||
|
416 | 640 | 1
|
||||||
|
426 | 10855 | 1
|
||||||
|
450 | 35 | 1
|
||||||
|
484 | 3843 | 1
|
||||||
|
504 | 14566 | 1
|
||||||
|
510 | 13569 | 1
|
||||||
|
532 | 3175 | 1
|
||||||
|
641 | 134 | 1
|
||||||
|
669 | 10944 | 1
|
||||||
|
716 | 2885 | 1
|
||||||
|
738 | 4355 | 1
|
||||||
|
802 | 2534 | 1
|
||||||
|
824 | 9287 | 1
|
||||||
|
864 | 3175 | 1
|
||||||
|
957 | 4293 | 1
|
||||||
|
960 | 10980 | 1
|
||||||
|
963 | 4580 | 1
|
||||||
|
(29 rows)
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
l_partkey, o_orderkey, count(*)
|
||||||
|
FROM
|
||||||
|
lineitem, orders
|
||||||
|
WHERE
|
||||||
|
l_suppkey = o_shippriority AND
|
||||||
|
l_quantity < 5.0 AND o_totalprice <> 4.0
|
||||||
|
GROUP BY
|
||||||
|
l_partkey, o_orderkey
|
||||||
|
ORDER BY
|
||||||
|
l_partkey, o_orderkey;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: generated sql query for task 2
|
||||||
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
||||||
|
DEBUG: generated sql query for task 4
|
||||||
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
||||||
|
DEBUG: generated sql query for task 6
|
||||||
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
||||||
|
DEBUG: generated sql query for task 8
|
||||||
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
||||||
|
DEBUG: generated sql query for task 10
|
||||||
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
||||||
|
DEBUG: generated sql query for task 12
|
||||||
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
||||||
|
DEBUG: generated sql query for task 14
|
||||||
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
|
||||||
|
DEBUG: generated sql query for task 16
|
||||||
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
|
||||||
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
DEBUG: assigned task 8 to node localhost:57637
|
||||||
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
|
DEBUG: assigned task 12 to node localhost:57637
|
||||||
|
DEBUG: assigned task 10 to node localhost:57638
|
||||||
|
DEBUG: assigned task 16 to node localhost:57637
|
||||||
|
DEBUG: assigned task 14 to node localhost:57638
|
||||||
|
DEBUG: generated sql query for task 2
|
||||||
|
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
|
||||||
|
DEBUG: generated sql query for task 4
|
||||||
|
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
|
||||||
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
DEBUG: join prunable for task partitionId 0 and 1
|
||||||
|
DEBUG: join prunable for task partitionId 0 and 2
|
||||||
|
DEBUG: join prunable for task partitionId 0 and 3
|
||||||
|
DEBUG: join prunable for task partitionId 1 and 0
|
||||||
|
DEBUG: join prunable for task partitionId 1 and 2
|
||||||
|
DEBUG: join prunable for task partitionId 1 and 3
|
||||||
|
DEBUG: join prunable for task partitionId 2 and 0
|
||||||
|
DEBUG: join prunable for task partitionId 2 and 1
|
||||||
|
DEBUG: join prunable for task partitionId 2 and 3
|
||||||
|
DEBUG: join prunable for task partitionId 3 and 0
|
||||||
|
DEBUG: join prunable for task partitionId 3 and 1
|
||||||
|
DEBUG: join prunable for task partitionId 3 and 2
|
||||||
|
DEBUG: generated sql query for task 3
|
||||||
|
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000017".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000005".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000017 "pg_merge_job_0004.task_000017" JOIN pg_merge_job_0005.task_000005 "pg_merge_job_0005.task_000005" ON (("pg_merge_job_0004.task_000017".intermediate_column_4_1 = "pg_merge_job_0005.task_000005".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000017".intermediate_column_4_0, "pg_merge_job_0005.task_000005".intermediate_column_5_0"
|
||||||
|
DEBUG: generated sql query for task 6
|
||||||
|
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000026".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000008".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000026 "pg_merge_job_0004.task_000026" JOIN pg_merge_job_0005.task_000008 "pg_merge_job_0005.task_000008" ON (("pg_merge_job_0004.task_000026".intermediate_column_4_1 = "pg_merge_job_0005.task_000008".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000026".intermediate_column_4_0, "pg_merge_job_0005.task_000008".intermediate_column_5_0"
|
||||||
|
DEBUG: generated sql query for task 9
|
||||||
|
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000035".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000011".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000035 "pg_merge_job_0004.task_000035" JOIN pg_merge_job_0005.task_000011 "pg_merge_job_0005.task_000011" ON (("pg_merge_job_0004.task_000035".intermediate_column_4_1 = "pg_merge_job_0005.task_000011".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000035".intermediate_column_4_0, "pg_merge_job_0005.task_000011".intermediate_column_5_0"
|
||||||
|
DEBUG: generated sql query for task 12
|
||||||
|
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000044".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000014".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000044 "pg_merge_job_0004.task_000044" JOIN pg_merge_job_0005.task_000014 "pg_merge_job_0005.task_000014" ON (("pg_merge_job_0004.task_000044".intermediate_column_4_1 = "pg_merge_job_0005.task_000014".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000044".intermediate_column_4_0, "pg_merge_job_0005.task_000014".intermediate_column_5_0"
|
||||||
|
DEBUG: pruning merge fetch taskId 1
|
||||||
|
DETAIL: Creating dependency on merge taskId 17
|
||||||
|
DEBUG: pruning merge fetch taskId 2
|
||||||
|
DETAIL: Creating dependency on merge taskId 5
|
||||||
|
DEBUG: pruning merge fetch taskId 4
|
||||||
|
DETAIL: Creating dependency on merge taskId 26
|
||||||
|
DEBUG: pruning merge fetch taskId 5
|
||||||
|
DETAIL: Creating dependency on merge taskId 8
|
||||||
|
DEBUG: pruning merge fetch taskId 7
|
||||||
|
DETAIL: Creating dependency on merge taskId 35
|
||||||
|
DEBUG: pruning merge fetch taskId 8
|
||||||
|
DETAIL: Creating dependency on merge taskId 11
|
||||||
|
DEBUG: pruning merge fetch taskId 10
|
||||||
|
DETAIL: Creating dependency on merge taskId 44
|
||||||
|
DEBUG: pruning merge fetch taskId 11
|
||||||
|
DETAIL: Creating dependency on merge taskId 14
|
||||||
|
DEBUG: assigned task 3 to node localhost:57637
|
||||||
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
|
DEBUG: assigned task 9 to node localhost:57637
|
||||||
|
DEBUG: assigned task 12 to node localhost:57638
|
||||||
|
DEBUG: completed cleanup query for job 6
|
||||||
|
DEBUG: completed cleanup query for job 6
|
||||||
|
DEBUG: completed cleanup query for job 4
|
||||||
|
DEBUG: completed cleanup query for job 4
|
||||||
|
DEBUG: completed cleanup query for job 5
|
||||||
|
DEBUG: completed cleanup query for job 5
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
l_partkey | o_orderkey | count
|
||||||
|
-----------+------------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
-- Reset client logging level to its previous value
|
||||||
|
SET client_min_messages TO NOTICE;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
COMMIT;
|
|
@ -6,17 +6,18 @@
|
||||||
-- from a sql task to its depended tasks. Note that we set the executor type to task
|
-- from a sql task to its depended tasks. Note that we set the executor type to task
|
||||||
-- tracker executor here, as we cannot run repartition jobs with real time executor.
|
-- tracker executor here, as we cannot run repartition jobs with real time executor.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
major_version
|
||||||
|
---------------
|
||||||
|
10
|
||||||
|
(1 row)
|
||||||
|
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SET client_min_messages TO DEBUG3;
|
SET client_min_messages TO DEBUG3;
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
SET citus.task_executor_type TO 'task-tracker';
|
SET citus.task_executor_type TO 'task-tracker';
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
-- Single range repartition join to test anchor-shard based task assignment and
|
-- Single range repartition join to test anchor-shard based task assignment and
|
||||||
-- assignment propagation to merge and data-fetch tasks.
|
-- assignment propagation to merge and data-fetch tasks.
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -25,7 +26,6 @@ FROM
|
||||||
orders, customer
|
orders, customer
|
||||||
WHERE
|
WHERE
|
||||||
o_custkey = c_custkey;
|
o_custkey = c_custkey;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: assigned task 4 to node localhost:57637
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
DEBUG: assigned task 2 to node localhost:57638
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||||
|
@ -43,7 +43,6 @@ DETAIL: Creating dependency on merge taskId 11
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
DEBUG: assigned task 9 to node localhost:57638
|
DEBUG: assigned task 9 to node localhost:57638
|
||||||
DEBUG: assigned task 3 to node localhost:57637
|
DEBUG: assigned task 3 to node localhost:57637
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
count
|
count
|
||||||
-------
|
-------
|
||||||
2984
|
2984
|
||||||
|
@ -54,9 +53,6 @@ DEBUG: CommitTransactionCommand
|
||||||
-- the same merge task, and tests our constraint group creation and assignment
|
-- the same merge task, and tests our constraint group creation and assignment
|
||||||
-- propagation. Here 'orders' is considered the small table.
|
-- propagation. Here 'orders' is considered the small table.
|
||||||
SET citus.large_table_shard_count TO 3;
|
SET citus.large_table_shard_count TO 3;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
SELECT
|
SELECT
|
||||||
count(*)
|
count(*)
|
||||||
FROM
|
FROM
|
||||||
|
@ -64,7 +60,6 @@ FROM
|
||||||
WHERE
|
WHERE
|
||||||
o_custkey = c_custkey AND
|
o_custkey = c_custkey AND
|
||||||
o_orderkey = l_orderkey;
|
o_orderkey = l_orderkey;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: assigned task 9 to node localhost:57637
|
DEBUG: assigned task 9 to node localhost:57637
|
||||||
DEBUG: assigned task 15 to node localhost:57638
|
DEBUG: assigned task 15 to node localhost:57638
|
||||||
DEBUG: assigned task 12 to node localhost:57637
|
DEBUG: assigned task 12 to node localhost:57637
|
||||||
|
@ -175,16 +170,12 @@ DEBUG: propagating assignment from merge task 54 to constrained sql task 45
|
||||||
DEBUG: propagating assignment from merge task 61 to constrained sql task 51
|
DEBUG: propagating assignment from merge task 61 to constrained sql task 51
|
||||||
DEBUG: propagating assignment from merge task 61 to constrained sql task 54
|
DEBUG: propagating assignment from merge task 61 to constrained sql task 54
|
||||||
DEBUG: propagating assignment from merge task 68 to constrained sql task 60
|
DEBUG: propagating assignment from merge task 68 to constrained sql task 60
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
count
|
count
|
||||||
-------
|
-------
|
||||||
11998
|
11998
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
-- Dual hash repartition join which tests the separate hash repartition join
|
-- Dual hash repartition join which tests the separate hash repartition join
|
||||||
-- task assignment algorithm.
|
-- task assignment algorithm.
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -193,7 +184,6 @@ FROM
|
||||||
lineitem, customer
|
lineitem, customer
|
||||||
WHERE
|
WHERE
|
||||||
l_partkey = c_nationkey;
|
l_partkey = c_nationkey;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: assigned task 4 to node localhost:57637
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
DEBUG: assigned task 2 to node localhost:57638
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
DEBUG: assigned task 8 to node localhost:57637
|
DEBUG: assigned task 8 to node localhost:57637
|
||||||
|
@ -237,7 +227,6 @@ DEBUG: assigned task 3 to node localhost:57638
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
DEBUG: assigned task 9 to node localhost:57638
|
DEBUG: assigned task 9 to node localhost:57638
|
||||||
DEBUG: assigned task 12 to node localhost:57637
|
DEBUG: assigned task 12 to node localhost:57637
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
count
|
count
|
||||||
-------
|
-------
|
||||||
125
|
125
|
||||||
|
@ -245,6 +234,4 @@ DEBUG: CommitTransactionCommand
|
||||||
|
|
||||||
-- Reset client logging level to its previous value
|
-- Reset client logging level to its previous value
|
||||||
SET client_min_messages TO NOTICE;
|
SET client_min_messages TO NOTICE;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
COMMIT;
|
COMMIT;
|
||||||
|
|
|
@ -0,0 +1,258 @@
|
||||||
|
--
|
||||||
|
-- MULTI_LARGE_TABLE_TASK_ASSIGNMENT
|
||||||
|
--
|
||||||
|
-- Tests which cover task assignment for MapMerge jobs for single range repartition
|
||||||
|
-- and dual hash repartition joins. The tests also cover task assignment propagation
|
||||||
|
-- from a sql task to its depended tasks. Note that we set the executor type to task
|
||||||
|
-- tracker executor here, as we cannot run repartition jobs with real time executor.
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
major_version
|
||||||
|
---------------
|
||||||
|
9
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
SET client_min_messages TO DEBUG3;
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
SET citus.large_table_shard_count TO 2;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
SET citus.task_executor_type TO 'task-tracker';
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
-- Single range repartition join to test anchor-shard based task assignment and
|
||||||
|
-- assignment propagation to merge and data-fetch tasks.
|
||||||
|
SELECT
|
||||||
|
count(*)
|
||||||
|
FROM
|
||||||
|
orders, customer
|
||||||
|
WHERE
|
||||||
|
o_custkey = c_custkey;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||||
|
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||||
|
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||||
|
DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
||||||
|
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||||
|
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||||
|
DEBUG: pruning merge fetch taskId 1
|
||||||
|
DETAIL: Creating dependency on merge taskId 5
|
||||||
|
DEBUG: pruning merge fetch taskId 4
|
||||||
|
DETAIL: Creating dependency on merge taskId 8
|
||||||
|
DEBUG: pruning merge fetch taskId 7
|
||||||
|
DETAIL: Creating dependency on merge taskId 11
|
||||||
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
|
DEBUG: assigned task 9 to node localhost:57638
|
||||||
|
DEBUG: assigned task 3 to node localhost:57637
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
2984
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Single range repartition join, along with a join with a small table containing
|
||||||
|
-- more than one shard. This situation results in multiple sql tasks depending on
|
||||||
|
-- the same merge task, and tests our constraint group creation and assignment
|
||||||
|
-- propagation. Here 'orders' is considered the small table.
|
||||||
|
SET citus.large_table_shard_count TO 3;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
SELECT
|
||||||
|
count(*)
|
||||||
|
FROM
|
||||||
|
orders, customer, lineitem
|
||||||
|
WHERE
|
||||||
|
o_custkey = c_custkey AND
|
||||||
|
o_orderkey = l_orderkey;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: assigned task 9 to node localhost:57637
|
||||||
|
DEBUG: assigned task 15 to node localhost:57638
|
||||||
|
DEBUG: assigned task 12 to node localhost:57637
|
||||||
|
DEBUG: assigned task 18 to node localhost:57638
|
||||||
|
DEBUG: assigned task 3 to node localhost:57637
|
||||||
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [10560,12036]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [12036,13473]
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [1509,4964] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [1509,4964] and [10560,12036]
|
||||||
|
DEBUG: join prunable for intervals [1509,4964] and [12036,13473]
|
||||||
|
DEBUG: join prunable for intervals [1509,4964] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [10560,12036]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [12036,13473]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [10560,12036]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [12036,13473]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [1509,4964]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [12036,13473]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [1509,4964]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [13473,14947]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [1509,4964]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [1,1509]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [1509,4964]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [2951,4455]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
|
||||||
|
DEBUG: pruning merge fetch taskId 1
|
||||||
|
DETAIL: Creating dependency on merge taskId 19
|
||||||
|
DEBUG: pruning merge fetch taskId 4
|
||||||
|
DETAIL: Creating dependency on merge taskId 19
|
||||||
|
DEBUG: pruning merge fetch taskId 7
|
||||||
|
DETAIL: Creating dependency on merge taskId 26
|
||||||
|
DEBUG: pruning merge fetch taskId 10
|
||||||
|
DETAIL: Creating dependency on merge taskId 26
|
||||||
|
DEBUG: pruning merge fetch taskId 13
|
||||||
|
DETAIL: Creating dependency on merge taskId 26
|
||||||
|
DEBUG: pruning merge fetch taskId 16
|
||||||
|
DETAIL: Creating dependency on merge taskId 26
|
||||||
|
DEBUG: pruning merge fetch taskId 19
|
||||||
|
DETAIL: Creating dependency on merge taskId 33
|
||||||
|
DEBUG: pruning merge fetch taskId 22
|
||||||
|
DETAIL: Creating dependency on merge taskId 33
|
||||||
|
DEBUG: pruning merge fetch taskId 25
|
||||||
|
DETAIL: Creating dependency on merge taskId 40
|
||||||
|
DEBUG: pruning merge fetch taskId 28
|
||||||
|
DETAIL: Creating dependency on merge taskId 40
|
||||||
|
DEBUG: pruning merge fetch taskId 31
|
||||||
|
DETAIL: Creating dependency on merge taskId 47
|
||||||
|
DEBUG: pruning merge fetch taskId 34
|
||||||
|
DETAIL: Creating dependency on merge taskId 47
|
||||||
|
DEBUG: pruning merge fetch taskId 37
|
||||||
|
DETAIL: Creating dependency on merge taskId 54
|
||||||
|
DEBUG: pruning merge fetch taskId 40
|
||||||
|
DETAIL: Creating dependency on merge taskId 54
|
||||||
|
DEBUG: pruning merge fetch taskId 43
|
||||||
|
DETAIL: Creating dependency on merge taskId 54
|
||||||
|
DEBUG: pruning merge fetch taskId 46
|
||||||
|
DETAIL: Creating dependency on merge taskId 61
|
||||||
|
DEBUG: pruning merge fetch taskId 49
|
||||||
|
DETAIL: Creating dependency on merge taskId 61
|
||||||
|
DEBUG: pruning merge fetch taskId 52
|
||||||
|
DETAIL: Creating dependency on merge taskId 61
|
||||||
|
DEBUG: pruning merge fetch taskId 55
|
||||||
|
DETAIL: Creating dependency on merge taskId 68
|
||||||
|
DEBUG: pruning merge fetch taskId 58
|
||||||
|
DETAIL: Creating dependency on merge taskId 68
|
||||||
|
DEBUG: assigned task 21 to node localhost:57637
|
||||||
|
DEBUG: assigned task 3 to node localhost:57638
|
||||||
|
DEBUG: assigned task 27 to node localhost:57637
|
||||||
|
DEBUG: assigned task 9 to node localhost:57638
|
||||||
|
DEBUG: assigned task 48 to node localhost:57637
|
||||||
|
DEBUG: assigned task 33 to node localhost:57638
|
||||||
|
DEBUG: assigned task 39 to node localhost:57637
|
||||||
|
DEBUG: assigned task 57 to node localhost:57638
|
||||||
|
DEBUG: propagating assignment from merge task 19 to constrained sql task 6
|
||||||
|
DEBUG: propagating assignment from merge task 26 to constrained sql task 12
|
||||||
|
DEBUG: propagating assignment from merge task 26 to constrained sql task 15
|
||||||
|
DEBUG: propagating assignment from merge task 26 to constrained sql task 18
|
||||||
|
DEBUG: propagating assignment from merge task 33 to constrained sql task 24
|
||||||
|
DEBUG: propagating assignment from merge task 40 to constrained sql task 30
|
||||||
|
DEBUG: propagating assignment from merge task 47 to constrained sql task 36
|
||||||
|
DEBUG: propagating assignment from merge task 54 to constrained sql task 42
|
||||||
|
DEBUG: propagating assignment from merge task 54 to constrained sql task 45
|
||||||
|
DEBUG: propagating assignment from merge task 61 to constrained sql task 51
|
||||||
|
DEBUG: propagating assignment from merge task 61 to constrained sql task 54
|
||||||
|
DEBUG: propagating assignment from merge task 68 to constrained sql task 60
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
11998
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SET citus.large_table_shard_count TO 2;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
-- Dual hash repartition join which tests the separate hash repartition join
|
||||||
|
-- task assignment algorithm.
|
||||||
|
SELECT
|
||||||
|
count(*)
|
||||||
|
FROM
|
||||||
|
lineitem, customer
|
||||||
|
WHERE
|
||||||
|
l_partkey = c_nationkey;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
DEBUG: assigned task 8 to node localhost:57637
|
||||||
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
|
DEBUG: assigned task 12 to node localhost:57637
|
||||||
|
DEBUG: assigned task 10 to node localhost:57638
|
||||||
|
DEBUG: assigned task 16 to node localhost:57637
|
||||||
|
DEBUG: assigned task 14 to node localhost:57638
|
||||||
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
|
DEBUG: assigned task 2 to node localhost:57637
|
||||||
|
DEBUG: join prunable for task partitionId 0 and 1
|
||||||
|
DEBUG: join prunable for task partitionId 0 and 2
|
||||||
|
DEBUG: join prunable for task partitionId 0 and 3
|
||||||
|
DEBUG: join prunable for task partitionId 1 and 0
|
||||||
|
DEBUG: join prunable for task partitionId 1 and 2
|
||||||
|
DEBUG: join prunable for task partitionId 1 and 3
|
||||||
|
DEBUG: join prunable for task partitionId 2 and 0
|
||||||
|
DEBUG: join prunable for task partitionId 2 and 1
|
||||||
|
DEBUG: join prunable for task partitionId 2 and 3
|
||||||
|
DEBUG: join prunable for task partitionId 3 and 0
|
||||||
|
DEBUG: join prunable for task partitionId 3 and 1
|
||||||
|
DEBUG: join prunable for task partitionId 3 and 2
|
||||||
|
DEBUG: pruning merge fetch taskId 1
|
||||||
|
DETAIL: Creating dependency on merge taskId 17
|
||||||
|
DEBUG: pruning merge fetch taskId 2
|
||||||
|
DETAIL: Creating dependency on merge taskId 7
|
||||||
|
DEBUG: pruning merge fetch taskId 4
|
||||||
|
DETAIL: Creating dependency on merge taskId 26
|
||||||
|
DEBUG: pruning merge fetch taskId 5
|
||||||
|
DETAIL: Creating dependency on merge taskId 11
|
||||||
|
DEBUG: pruning merge fetch taskId 7
|
||||||
|
DETAIL: Creating dependency on merge taskId 35
|
||||||
|
DEBUG: pruning merge fetch taskId 8
|
||||||
|
DETAIL: Creating dependency on merge taskId 15
|
||||||
|
DEBUG: pruning merge fetch taskId 10
|
||||||
|
DETAIL: Creating dependency on merge taskId 44
|
||||||
|
DEBUG: pruning merge fetch taskId 11
|
||||||
|
DETAIL: Creating dependency on merge taskId 19
|
||||||
|
DEBUG: assigned task 3 to node localhost:57638
|
||||||
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
|
DEBUG: assigned task 9 to node localhost:57638
|
||||||
|
DEBUG: assigned task 12 to node localhost:57637
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
125
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Reset client logging level to its previous value
|
||||||
|
SET client_min_messages TO NOTICE;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
COMMIT;
|
|
@ -240,16 +240,27 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport;
|
||||||
1310007 | 1 | 0 | localhost | 57638 | 100007
|
1310007 | 1 | 0 | localhost | 57638 | 100007
|
||||||
(8 rows)
|
(8 rows)
|
||||||
|
|
||||||
\d mx_testing_schema.mx_test_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass;
|
||||||
Table "mx_testing_schema.mx_test_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+---------------------------------------------------------------------------------
|
--------+---------+---------------------------------------------------------------------------------
|
||||||
col_1 | integer |
|
col_1 | integer |
|
||||||
col_2 | text | not null
|
col_2 | text | not null
|
||||||
col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass)
|
col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass)
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_test_table_col_1_key" UNIQUE CONSTRAINT, btree (col_1)
|
|
||||||
"mx_index" btree (col_2)
|
\d mx_testing_schema.mx_test_table_col_1_key
|
||||||
|
Index "mx_testing_schema.mx_test_table_col_1_key"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
col_1 | integer | col_1
|
||||||
|
unique, btree, for table "mx_testing_schema.mx_test_table"
|
||||||
|
|
||||||
|
\d mx_testing_schema.mx_index
|
||||||
|
Index "mx_testing_schema.mx_index"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+------+------------
|
||||||
|
col_2 | text | col_2
|
||||||
|
btree, for table "mx_testing_schema.mx_test_table"
|
||||||
|
|
||||||
-- Check that pg_dist_colocation is not synced
|
-- Check that pg_dist_colocation is not synced
|
||||||
SELECT * FROM pg_dist_colocation ORDER BY colocationid;
|
SELECT * FROM pg_dist_colocation ORDER BY colocationid;
|
||||||
|
@ -295,15 +306,11 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||||
|
|
||||||
-- Check that foreign key metadata exists on the worker
|
-- Check that foreign key metadata exists on the worker
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_testing_schema_2.fk_test_2
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass;
|
||||||
Table "mx_testing_schema_2.fk_test_2"
|
Constraint | Definition
|
||||||
Column | Type | Modifiers
|
---------------------+-----------------------------------------------------------------------------
|
||||||
--------+---------+-----------
|
fk_test_2_col1_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3)
|
||||||
col1 | integer |
|
(1 row)
|
||||||
col2 | integer |
|
|
||||||
col3 | text |
|
|
||||||
Foreign-key constraints:
|
|
||||||
"fk_test_2_col1_fkey" FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3)
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP TABLE mx_testing_schema_2.fk_test_2;
|
DROP TABLE mx_testing_schema_2.fk_test_2;
|
||||||
|
@ -370,16 +377,27 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport;
|
||||||
1310007 | 1 | 0 | localhost | 57638 | 100007
|
1310007 | 1 | 0 | localhost | 57638 | 100007
|
||||||
(8 rows)
|
(8 rows)
|
||||||
|
|
||||||
\d mx_testing_schema.mx_test_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass;
|
||||||
Table "mx_testing_schema.mx_test_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+---------------------------------------------------------------------------------
|
--------+---------+---------------------------------------------------------------------------------
|
||||||
col_1 | integer |
|
col_1 | integer |
|
||||||
col_2 | text | not null
|
col_2 | text | not null
|
||||||
col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass)
|
col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass)
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_test_table_col_1_key" UNIQUE CONSTRAINT, btree (col_1)
|
|
||||||
"mx_index" btree (col_2)
|
\d mx_testing_schema.mx_test_table_col_1_key
|
||||||
|
Index "mx_testing_schema.mx_test_table_col_1_key"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
col_1 | integer | col_1
|
||||||
|
unique, btree, for table "mx_testing_schema.mx_test_table"
|
||||||
|
|
||||||
|
\d mx_testing_schema.mx_index
|
||||||
|
Index "mx_testing_schema.mx_index"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+------+------------
|
||||||
|
col_2 | text | col_2
|
||||||
|
btree, for table "mx_testing_schema.mx_test_table"
|
||||||
|
|
||||||
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
|
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
|
||||||
count
|
count
|
||||||
|
@ -499,28 +517,46 @@ CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1);
|
||||||
CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text);
|
CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text);
|
||||||
CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2);
|
CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2);
|
||||||
ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1);
|
ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1);
|
||||||
\d mx_test_schema_1.mx_table_1
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||||
Table "mx_test_schema_1.mx_table_1"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
col1 | integer |
|
col1 | integer |
|
||||||
col2 | text |
|
col2 | text |
|
||||||
Indexes:
|
(2 rows)
|
||||||
"mx_table_1_col1_key" UNIQUE CONSTRAINT, btree (col1)
|
|
||||||
"mx_index_1" btree (col1)
|
|
||||||
Referenced by:
|
|
||||||
TABLE "mx_test_schema_2.mx_table_2" CONSTRAINT "mx_fk_constraint" FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
|
|
||||||
|
|
||||||
\d mx_test_schema_2.mx_table_2
|
\d mx_test_schema_1.mx_table_1_col1_key
|
||||||
Table "mx_test_schema_2.mx_table_2"
|
Index "mx_test_schema_1.mx_table_1_col1_key"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
col1 | integer | col1
|
||||||
|
unique, btree, for table "mx_test_schema_1.mx_table_1"
|
||||||
|
|
||||||
|
\d mx_test_schema_1.mx_index_1
|
||||||
|
Index "mx_test_schema_1.mx_index_1"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
col1 | integer | col1
|
||||||
|
btree, for table "mx_test_schema_1.mx_table_1"
|
||||||
|
|
||||||
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_2.mx_table_2'::regclass;
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
col1 | integer |
|
col1 | integer |
|
||||||
col2 | text |
|
col2 | text |
|
||||||
Indexes:
|
(2 rows)
|
||||||
"mx_index_2" btree (col2)
|
|
||||||
Foreign-key constraints:
|
\d mx_test_schema_2.mx_index_2
|
||||||
"mx_fk_constraint" FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
|
Index "mx_test_schema_2.mx_index_2"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+------+------------
|
||||||
|
col2 | text | col2
|
||||||
|
btree, for table "mx_test_schema_2.mx_table_2"
|
||||||
|
|
||||||
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_2.mx_table_2'::regclass;
|
||||||
|
Constraint | Definition
|
||||||
|
------------------+-----------------------------------------------------------------
|
||||||
|
mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
|
||||||
|
(1 row)
|
||||||
|
|
||||||
SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1');
|
SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
@ -578,28 +614,13 @@ ORDER BY
|
||||||
-- Check that metadata of MX tables exist on the metadata worker
|
-- Check that metadata of MX tables exist on the metadata worker
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
-- Check that tables are created
|
-- Check that tables are created
|
||||||
\d mx_test_schema_1.mx_table_1
|
\dt mx_test_schema_?.mx_table_?
|
||||||
Table "mx_test_schema_1.mx_table_1"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
--------+---------+-----------
|
------------------+------------+-------+----------
|
||||||
col1 | integer |
|
mx_test_schema_1 | mx_table_1 | table | postgres
|
||||||
col2 | text |
|
mx_test_schema_2 | mx_table_2 | table | postgres
|
||||||
Indexes:
|
(2 rows)
|
||||||
"mx_table_1_col1_key" UNIQUE CONSTRAINT, btree (col1)
|
|
||||||
"mx_index_1" btree (col1)
|
|
||||||
Referenced by:
|
|
||||||
TABLE "mx_test_schema_2.mx_table_2" CONSTRAINT "mx_fk_constraint" FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
|
|
||||||
|
|
||||||
\d mx_test_schema_2.mx_table_2
|
|
||||||
Table "mx_test_schema_2.mx_table_2"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
--------+---------+-----------
|
|
||||||
col1 | integer |
|
|
||||||
col2 | text |
|
|
||||||
Indexes:
|
|
||||||
"mx_index_2" btree (col2)
|
|
||||||
Foreign-key constraints:
|
|
||||||
"mx_fk_constraint" FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
|
|
||||||
|
|
||||||
-- Check that table metadata are created
|
-- Check that table metadata are created
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -663,38 +684,28 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport;
|
||||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||||
SET client_min_messages TO 'ERROR';
|
SET client_min_messages TO 'ERROR';
|
||||||
CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1);
|
CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1);
|
||||||
CREATE UNIQUE INDEX mx_index_4 ON mx_test_schema_2.mx_table_2(col1);
|
ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1);
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_test_schema_2.mx_table_2
|
\d mx_test_schema_2.mx_index_3
|
||||||
Table "mx_test_schema_2.mx_table_2"
|
Index "mx_test_schema_2.mx_index_3"
|
||||||
Column | Type | Modifiers
|
Column | Type | Definition
|
||||||
--------+---------+-----------
|
--------+---------+------------
|
||||||
col1 | integer |
|
col1 | integer | col1
|
||||||
col2 | text |
|
hash, for table "mx_test_schema_2.mx_table_2"
|
||||||
Indexes:
|
|
||||||
"mx_index_4" UNIQUE, btree (col1)
|
\d mx_test_schema_2.mx_table_2_col1_key
|
||||||
"mx_index_2" btree (col2)
|
Index "mx_test_schema_2.mx_table_2_col1_key"
|
||||||
"mx_index_3" hash (col1)
|
Column | Type | Definition
|
||||||
Foreign-key constraints:
|
--------+---------+------------
|
||||||
"mx_fk_constraint" FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
|
col1 | integer | col1
|
||||||
|
unique, btree, for table "mx_test_schema_2.mx_table_2"
|
||||||
|
|
||||||
-- Check that DROP INDEX statement is propagated
|
-- Check that DROP INDEX statement is propagated
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||||
DROP INDEX mx_test_schema_2.mx_index_3;
|
DROP INDEX mx_test_schema_2.mx_index_3;
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_test_schema_2.mx_table_2
|
\d mx_test_schema_2.mx_index_3
|
||||||
Table "mx_test_schema_2.mx_table_2"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
--------+---------+-----------
|
|
||||||
col1 | integer |
|
|
||||||
col2 | text |
|
|
||||||
Indexes:
|
|
||||||
"mx_index_4" UNIQUE, btree (col1)
|
|
||||||
"mx_index_2" btree (col2)
|
|
||||||
Foreign-key constraints:
|
|
||||||
"mx_fk_constraint" FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
|
|
||||||
|
|
||||||
-- Check that ALTER TABLE statements are propagated
|
-- Check that ALTER TABLE statements are propagated
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||||
|
@ -709,20 +720,19 @@ FOREIGN KEY
|
||||||
REFERENCES
|
REFERENCES
|
||||||
mx_test_schema_2.mx_table_2(col1);
|
mx_test_schema_2.mx_table_2(col1);
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_test_schema_1.mx_table_1
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||||
Table "mx_test_schema_1.mx_table_1"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
col1 | integer |
|
col1 | integer |
|
||||||
col2 | text |
|
col2 | text |
|
||||||
col3 | integer |
|
col3 | integer |
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_table_1_col1_key" UNIQUE CONSTRAINT, btree (col1)
|
|
||||||
"mx_index_1" btree (col1)
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||||
Foreign-key constraints:
|
Constraint | Definition
|
||||||
"mx_fk_constraint" FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1)
|
------------------+-----------------------------------------------------------------
|
||||||
Referenced by:
|
mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1)
|
||||||
TABLE "mx_test_schema_2.mx_table_2" CONSTRAINT "mx_fk_constraint" FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
|
(1 row)
|
||||||
|
|
||||||
-- Check that foreign key constraint with NOT VALID works as well
|
-- Check that foreign key constraint with NOT VALID works as well
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
@ -738,20 +748,11 @@ REFERENCES
|
||||||
mx_test_schema_2.mx_table_2(col1)
|
mx_test_schema_2.mx_table_2(col1)
|
||||||
NOT VALID;
|
NOT VALID;
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_test_schema_1.mx_table_1
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||||
Table "mx_test_schema_1.mx_table_1"
|
Constraint | Definition
|
||||||
Column | Type | Modifiers
|
--------------------+-----------------------------------------------------------------
|
||||||
--------+---------+-----------
|
mx_fk_constraint_2 | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1)
|
||||||
col1 | integer |
|
(1 row)
|
||||||
col2 | text |
|
|
||||||
col3 | integer |
|
|
||||||
Indexes:
|
|
||||||
"mx_table_1_col1_key" UNIQUE CONSTRAINT, btree (col1)
|
|
||||||
"mx_index_1" btree (col1)
|
|
||||||
Foreign-key constraints:
|
|
||||||
"mx_fk_constraint_2" FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) NOT VALID
|
|
||||||
Referenced by:
|
|
||||||
TABLE "mx_test_schema_2.mx_table_2" CONSTRAINT "mx_fk_constraint" FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1)
|
|
||||||
|
|
||||||
-- Check that mark_tables_colocated call propagates the changes to the workers
|
-- Check that mark_tables_colocated call propagates the changes to the workers
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
@ -932,13 +933,13 @@ SELECT create_distributed_table('mx_table_with_sequence', 'a');
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\d mx_table_with_sequence
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
|
||||||
Table "public.mx_table_with_sequence"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+--------------------------------------------------------------------
|
--------+---------+--------------------------------------------------------------------
|
||||||
a | integer |
|
a | integer |
|
||||||
b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass)
|
b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass)
|
||||||
c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass)
|
c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass)
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
\ds mx_table_with_sequence_b_seq
|
\ds mx_table_with_sequence_b_seq
|
||||||
List of relations
|
List of relations
|
||||||
|
@ -956,13 +957,13 @@ SELECT create_distributed_table('mx_table_with_sequence', 'a');
|
||||||
|
|
||||||
-- Check that the sequences created on the metadata worker as well
|
-- Check that the sequences created on the metadata worker as well
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_table_with_sequence
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
|
||||||
Table "public.mx_table_with_sequence"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+--------------------------------------------------------------------
|
--------+---------+--------------------------------------------------------------------
|
||||||
a | integer |
|
a | integer |
|
||||||
b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass)
|
b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass)
|
||||||
c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass)
|
c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass)
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
\ds mx_table_with_sequence_b_seq
|
\ds mx_table_with_sequence_b_seq
|
||||||
List of relations
|
List of relations
|
||||||
|
@ -1006,13 +1007,13 @@ SELECT groupid FROM pg_dist_local_group;
|
||||||
2
|
2
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\d mx_table_with_sequence
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
|
||||||
Table "public.mx_table_with_sequence"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+--------------------------------------------------------------------
|
--------+---------+--------------------------------------------------------------------
|
||||||
a | integer |
|
a | integer |
|
||||||
b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass)
|
b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass)
|
||||||
c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass)
|
c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass)
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
\ds mx_table_with_sequence_b_seq
|
\ds mx_table_with_sequence_b_seq
|
||||||
List of relations
|
List of relations
|
||||||
|
@ -1204,20 +1205,20 @@ SELECT create_reference_table('mx_ref');
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\d mx_ref
|
\dt mx_ref
|
||||||
Table "public.mx_ref"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
--------+---------+-----------
|
--------+--------+-------+----------
|
||||||
col_1 | integer |
|
public | mx_ref | table | postgres
|
||||||
col_2 | text |
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_ref
|
\dt mx_ref
|
||||||
Table "public.mx_ref"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
--------+---------+-----------
|
--------+--------+-------+----------
|
||||||
col_1 | integer |
|
public | mx_ref | table | postgres
|
||||||
col_2 | text |
|
(1 row)
|
||||||
|
|
||||||
SELECT
|
SELECT
|
||||||
logicalrelid, partmethod, repmodel, shardid, placementid, nodename, nodeport
|
logicalrelid, partmethod, repmodel, shardid, placementid, nodename, nodeport
|
||||||
|
@ -1243,26 +1244,36 @@ ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0;
|
||||||
NOTICE: using one-phase commit for distributed DDL commands
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
CREATE INDEX mx_ref_index ON mx_ref(col_1);
|
CREATE INDEX mx_ref_index ON mx_ref(col_1);
|
||||||
\d mx_ref
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass;
|
||||||
Table "public.mx_ref"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
col_1 | integer |
|
col_1 | integer |
|
||||||
col_2 | text |
|
col_2 | text |
|
||||||
col_3 | numeric | default 0
|
col_3 | numeric | default 0
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_ref_index" btree (col_1)
|
|
||||||
|
\d mx_ref_index
|
||||||
|
Index "public.mx_ref_index"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
col_1 | integer | col_1
|
||||||
|
btree, for table "public.mx_ref"
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_ref
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass;
|
||||||
Table "public.mx_ref"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
col_1 | integer |
|
col_1 | integer |
|
||||||
col_2 | text |
|
col_2 | text |
|
||||||
col_3 | numeric | default 0
|
col_3 | numeric | default 0
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_ref_index" btree (col_1)
|
|
||||||
|
\d mx_ref_index
|
||||||
|
Index "public.mx_ref_index"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
col_1 | integer | col_1
|
||||||
|
btree, for table "public.mx_ref"
|
||||||
|
|
||||||
|
|
||||||
-- Check that metada is cleaned successfully upon drop table
|
-- Check that metada is cleaned successfully upon drop table
|
||||||
|
|
|
@ -185,12 +185,12 @@ INSERT INTO labs VALUES (6, 'Bell Labs');
|
||||||
ERROR: single-shard DML commands must not appear in transaction blocks which contain multi-shard data modifications
|
ERROR: single-shard DML commands must not appear in transaction blocks which contain multi-shard data modifications
|
||||||
COMMIT;
|
COMMIT;
|
||||||
-- but the DDL should correctly roll back
|
-- but the DDL should correctly roll back
|
||||||
\d labs
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.labs'::regclass;
|
||||||
Table "public.labs"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+--------+-----------
|
--------+--------+-----------
|
||||||
id | bigint | not null
|
id | bigint | not null
|
||||||
name | text | not null
|
name | text | not null
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
SELECT * FROM labs WHERE id = 6;
|
SELECT * FROM labs WHERE id = 6;
|
||||||
id | name
|
id | name
|
||||||
|
|
|
@ -30,67 +30,112 @@ SELECT master_modify_multiple_shards('UPDATE mx_ddl_table SET version=0.1 WHERE
|
||||||
-- SET NOT NULL
|
-- SET NOT NULL
|
||||||
ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL;
|
ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL;
|
||||||
-- See that the changes are applied on coordinator, worker tables and shards
|
-- See that the changes are applied on coordinator, worker tables and shards
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
Table "public.mx_ddl_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
---------+---------+--------------------
|
---------+---------+--------------------
|
||||||
key | integer | not null
|
key | integer | not null
|
||||||
value | integer |
|
value | integer |
|
||||||
version | integer | not null default 1
|
version | integer | not null default 1
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_ddl_table_pkey" PRIMARY KEY, btree (key)
|
|
||||||
"ddl_test_concurrent_index" btree (value)
|
\d ddl_test*_index
|
||||||
"ddl_test_index" btree (value)
|
Index "public.ddl_test_concurrent_index"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
value | integer | value
|
||||||
|
btree, for table "public.mx_ddl_table"
|
||||||
|
|
||||||
|
Index "public.ddl_test_index"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
value | integer | value
|
||||||
|
btree, for table "public.mx_ddl_table"
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
Table "public.mx_ddl_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
---------+---------+--------------------
|
---------+---------+--------------------
|
||||||
key | integer | not null
|
key | integer | not null
|
||||||
value | integer |
|
value | integer |
|
||||||
version | integer | not null default 1
|
version | integer | not null default 1
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_ddl_table_pkey" PRIMARY KEY, btree (key)
|
|
||||||
"ddl_test_concurrent_index" btree (value)
|
|
||||||
"ddl_test_index" btree (value)
|
|
||||||
|
|
||||||
\d mx_ddl_table_1220088
|
\d ddl_test*_index
|
||||||
Table "public.mx_ddl_table_1220088"
|
Index "public.ddl_test_concurrent_index"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
value | integer | value
|
||||||
|
btree, for table "public.mx_ddl_table"
|
||||||
|
|
||||||
|
Index "public.ddl_test_index"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
value | integer | value
|
||||||
|
btree, for table "public.mx_ddl_table"
|
||||||
|
|
||||||
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass;
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
---------+---------+--------------------
|
---------+---------+--------------------
|
||||||
key | integer | not null
|
key | integer | not null
|
||||||
value | integer |
|
value | integer |
|
||||||
version | integer | not null default 1
|
version | integer | not null default 1
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_ddl_table_pkey_1220088" PRIMARY KEY, btree (key)
|
|
||||||
"ddl_test_concurrent_index_1220088" btree (value)
|
\d ddl_test*_index_1220088
|
||||||
"ddl_test_index_1220088" btree (value)
|
Index "public.ddl_test_concurrent_index_1220088"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
value | integer | value
|
||||||
|
btree, for table "public.mx_ddl_table_1220088"
|
||||||
|
|
||||||
|
Index "public.ddl_test_index_1220088"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
value | integer | value
|
||||||
|
btree, for table "public.mx_ddl_table_1220088"
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
Table "public.mx_ddl_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
---------+---------+--------------------
|
---------+---------+--------------------
|
||||||
key | integer | not null
|
key | integer | not null
|
||||||
value | integer |
|
value | integer |
|
||||||
version | integer | not null default 1
|
version | integer | not null default 1
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_ddl_table_pkey" PRIMARY KEY, btree (key)
|
|
||||||
"ddl_test_concurrent_index" btree (value)
|
|
||||||
"ddl_test_index" btree (value)
|
|
||||||
|
|
||||||
\d mx_ddl_table_1220089
|
\d ddl_test*_index
|
||||||
Table "public.mx_ddl_table_1220089"
|
Index "public.ddl_test_concurrent_index"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
value | integer | value
|
||||||
|
btree, for table "public.mx_ddl_table"
|
||||||
|
|
||||||
|
Index "public.ddl_test_index"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
value | integer | value
|
||||||
|
btree, for table "public.mx_ddl_table"
|
||||||
|
|
||||||
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass;
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
---------+---------+--------------------
|
---------+---------+--------------------
|
||||||
key | integer | not null
|
key | integer | not null
|
||||||
value | integer |
|
value | integer |
|
||||||
version | integer | not null default 1
|
version | integer | not null default 1
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_ddl_table_pkey_1220089" PRIMARY KEY, btree (key)
|
|
||||||
"ddl_test_concurrent_index_1220089" btree (value)
|
\d ddl_test*_index_1220089
|
||||||
"ddl_test_index_1220089" btree (value)
|
Index "public.ddl_test_concurrent_index_1220089"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
value | integer | value
|
||||||
|
btree, for table "public.mx_ddl_table_1220089"
|
||||||
|
|
||||||
|
Index "public.ddl_test_index_1220089"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+---------+------------
|
||||||
|
value | integer | value
|
||||||
|
btree, for table "public.mx_ddl_table_1220089"
|
||||||
|
|
||||||
INSERT INTO mx_ddl_table VALUES (37, 78, 2);
|
INSERT INTO mx_ddl_table VALUES (37, 78, 2);
|
||||||
INSERT INTO mx_ddl_table VALUES (38, 78);
|
INSERT INTO mx_ddl_table VALUES (38, 78);
|
||||||
|
@ -132,52 +177,72 @@ ALTER TABLE mx_ddl_table ALTER COLUMN version DROP NOT NULL;
|
||||||
-- DROP COLUMN
|
-- DROP COLUMN
|
||||||
ALTER TABLE mx_ddl_table DROP COLUMN version;
|
ALTER TABLE mx_ddl_table DROP COLUMN version;
|
||||||
-- See that the changes are applied on coordinator, worker tables and shards
|
-- See that the changes are applied on coordinator, worker tables and shards
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
Table "public.mx_ddl_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
key | integer | not null
|
key | integer | not null
|
||||||
value | integer |
|
value | integer |
|
||||||
Indexes:
|
(2 rows)
|
||||||
"mx_ddl_table_pkey" PRIMARY KEY, btree (key)
|
|
||||||
|
\di ddl_test*_index
|
||||||
|
List of relations
|
||||||
|
Schema | Name | Type | Owner | Table
|
||||||
|
--------+------+------+-------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
Table "public.mx_ddl_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
key | integer | not null
|
key | integer | not null
|
||||||
value | integer |
|
value | integer |
|
||||||
Indexes:
|
(2 rows)
|
||||||
"mx_ddl_table_pkey" PRIMARY KEY, btree (key)
|
|
||||||
|
|
||||||
\d mx_ddl_table_1220088
|
\di ddl_test*_index
|
||||||
Table "public.mx_ddl_table_1220088"
|
List of relations
|
||||||
|
Schema | Name | Type | Owner | Table
|
||||||
|
--------+------+------+-------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass;
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
key | integer | not null
|
key | integer | not null
|
||||||
value | integer |
|
value | integer |
|
||||||
Indexes:
|
(2 rows)
|
||||||
"mx_ddl_table_pkey_1220088" PRIMARY KEY, btree (key)
|
|
||||||
|
\di ddl_test*_index_1220088
|
||||||
|
List of relations
|
||||||
|
Schema | Name | Type | Owner | Table
|
||||||
|
--------+------+------+-------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
Table "public.mx_ddl_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
key | integer | not null
|
key | integer | not null
|
||||||
value | integer |
|
value | integer |
|
||||||
Indexes:
|
(2 rows)
|
||||||
"mx_ddl_table_pkey" PRIMARY KEY, btree (key)
|
|
||||||
|
|
||||||
\d mx_ddl_table_1220089
|
\di ddl_test*_index
|
||||||
Table "public.mx_ddl_table_1220089"
|
List of relations
|
||||||
|
Schema | Name | Type | Owner | Table
|
||||||
|
--------+------+------+-------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass;
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
key | integer | not null
|
key | integer | not null
|
||||||
value | integer |
|
value | integer |
|
||||||
Indexes:
|
(2 rows)
|
||||||
"mx_ddl_table_pkey_1220089" PRIMARY KEY, btree (key)
|
|
||||||
|
\di ddl_test*_index_1220089
|
||||||
|
List of relations
|
||||||
|
Schema | Name | Type | Owner | Table
|
||||||
|
--------+------+------+-------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
-- Show that DDL commands are done within a two-phase commit transaction
|
-- Show that DDL commands are done within a two-phase commit transaction
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
|
@ -43,15 +43,26 @@ SELECT count(*) FROM pg_dist_transaction;
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d distributed_mx_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass;
|
||||||
Table "public.distributed_mx_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+-------+-----------
|
--------+-------+-----------
|
||||||
key | text | not null
|
key | text | not null
|
||||||
value | jsonb |
|
value | jsonb |
|
||||||
Indexes:
|
(2 rows)
|
||||||
"distributed_mx_table_pkey" PRIMARY KEY, btree (key)
|
|
||||||
"distributed_mx_table_value_idx" gin (value)
|
\d distributed_mx_table_pkey
|
||||||
|
Index "public.distributed_mx_table_pkey"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+------+------------
|
||||||
|
key | text | key
|
||||||
|
primary key, btree, for table "public.distributed_mx_table"
|
||||||
|
|
||||||
|
\d distributed_mx_table_value_idx
|
||||||
|
Index "public.distributed_mx_table_value_idx"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+------+------------
|
||||||
|
value | text | value
|
||||||
|
gin, for table "public.distributed_mx_table"
|
||||||
|
|
||||||
SELECT repmodel FROM pg_dist_partition
|
SELECT repmodel FROM pg_dist_partition
|
||||||
WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
||||||
|
@ -68,15 +79,26 @@ WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
\d distributed_mx_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass;
|
||||||
Table "public.distributed_mx_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+-------+-----------
|
--------+-------+-----------
|
||||||
key | text | not null
|
key | text | not null
|
||||||
value | jsonb |
|
value | jsonb |
|
||||||
Indexes:
|
(2 rows)
|
||||||
"distributed_mx_table_pkey" PRIMARY KEY, btree (key)
|
|
||||||
"distributed_mx_table_value_idx" gin (value)
|
\d distributed_mx_table_pkey
|
||||||
|
Index "public.distributed_mx_table_pkey"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+------+------------
|
||||||
|
key | text | key
|
||||||
|
primary key, btree, for table "public.distributed_mx_table"
|
||||||
|
|
||||||
|
\d distributed_mx_table_value_idx
|
||||||
|
Index "public.distributed_mx_table_value_idx"
|
||||||
|
Column | Type | Definition
|
||||||
|
--------+------+------------
|
||||||
|
value | text | value
|
||||||
|
gin, for table "public.distributed_mx_table"
|
||||||
|
|
||||||
SELECT repmodel FROM pg_dist_partition
|
SELECT repmodel FROM pg_dist_partition
|
||||||
WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
||||||
|
|
|
@ -20,18 +20,13 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d too_long_*
|
\dt too_long_*
|
||||||
Table "public.too_long_12345678901234567890123456789012345678_e0119164_225000"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
--------+---------+-----------
|
--------+-----------------------------------------------------------------+-------+----------
|
||||||
col1 | integer | not null
|
public | too_long_12345678901234567890123456789012345678_e0119164_225000 | table | postgres
|
||||||
col2 | integer | not null
|
public | too_long_12345678901234567890123456789012345678_e0119164_225001 | table | postgres
|
||||||
|
(2 rows)
|
||||||
Table "public.too_long_12345678901234567890123456789012345678_e0119164_225001"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
--------+---------+-----------
|
|
||||||
col1 | integer | not null
|
|
||||||
col2 | integer | not null
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Verify that the UDF works and rejects bad arguments.
|
-- Verify that the UDF works and rejects bad arguments.
|
||||||
|
@ -83,8 +78,7 @@ ERROR: cannot create constraint without a name on a distributed table
|
||||||
ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345678901234567890 > '2014-01-01'::date);
|
ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345678901234567890 > '2014-01-01'::date);
|
||||||
ERROR: cannot create constraint without a name on a distributed table
|
ERROR: cannot create constraint without a name on a distributed table
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d name_lengths_*
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_lengths_225002'::regclass;
|
||||||
Table "public.name_lengths_225002"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------------------------------------------------------------+------------------+-----------
|
--------------------------------------------------------------+------------------+-----------
|
||||||
col1 | integer | not null
|
col1 | integer | not null
|
||||||
|
@ -92,19 +86,7 @@ ERROR: cannot create constraint without a name on a distributed table
|
||||||
float_col_12345678901234567890123456789012345678901234567890 | double precision |
|
float_col_12345678901234567890123456789012345678901234567890 | double precision |
|
||||||
date_col_12345678901234567890123456789012345678901234567890 | date |
|
date_col_12345678901234567890123456789012345678901234567890 | date |
|
||||||
int_col_12345678901234567890123456789012345678901234567890 | integer | default 1
|
int_col_12345678901234567890123456789012345678901234567890 | integer | default 1
|
||||||
Indexes:
|
(5 rows)
|
||||||
"constraint_a_225002" UNIQUE CONSTRAINT, btree (col1)
|
|
||||||
|
|
||||||
Table "public.name_lengths_225003"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
--------------------------------------------------------------+------------------+-----------
|
|
||||||
col1 | integer | not null
|
|
||||||
col2 | integer | not null
|
|
||||||
float_col_12345678901234567890123456789012345678901234567890 | double precision |
|
|
||||||
date_col_12345678901234567890123456789012345678901234567890 | date |
|
|
||||||
int_col_12345678901234567890123456789012345678901234567890 | integer | default 1
|
|
||||||
Indexes:
|
|
||||||
"constraint_a_225003" UNIQUE CONSTRAINT, btree (col1)
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Placeholders for unsupported add constraints with EXPLICIT names that are too long
|
-- Placeholders for unsupported add constraints with EXPLICIT names that are too long
|
||||||
|
@ -118,7 +100,12 @@ ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_123456789012345678901234567890
|
||||||
NOTICE: using one-phase commit for distributed DDL commands
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d nl_*
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_lengths_225002'::regclass;
|
||||||
|
Constraint | Definition
|
||||||
|
-----------------------------------------------------------------+-------------------------------------------------------------------------------------------
|
||||||
|
nl_checky_1234567890123456789012345678901234567_b16df46d_225002 | CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '01-01-2014'::date)
|
||||||
|
(1 row)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Placeholders for RENAME operations
|
-- Placeholders for RENAME operations
|
||||||
ALTER TABLE name_lengths RENAME TO name_len_12345678901234567890123456789012345678901234567890;
|
ALTER TABLE name_lengths RENAME TO name_len_12345678901234567890123456789012345678901234567890;
|
||||||
|
@ -203,22 +190,18 @@ CREATE TABLE sneaky_name_lengths (
|
||||||
col2 integer not null,
|
col2 integer not null,
|
||||||
CONSTRAINT checky_12345678901234567890123456789012345678901234567890 CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
|
CONSTRAINT checky_12345678901234567890123456789012345678901234567890 CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
|
||||||
);
|
);
|
||||||
\d sneaky_name_lengths*
|
\di public.sneaky_name_lengths*
|
||||||
Table "public.sneaky_name_lengths"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner | Table
|
||||||
----------------------------------------------------------------+---------+-----------
|
--------+-----------------------------------------------------------------+-------+----------+---------------------
|
||||||
int_col_123456789012345678901234567890123456789012345678901234 | integer | not null
|
public | sneaky_name_lengths_int_col_1234567890123456789012345678901_key | index | postgres | sneaky_name_lengths
|
||||||
col2 | integer | not null
|
(1 row)
|
||||||
Indexes:
|
|
||||||
"sneaky_name_lengths_int_col_1234567890123456789012345678901_key" UNIQUE CONSTRAINT, btree (int_col_123456789012345678901234567890123456789012345678901234)
|
|
||||||
Check constraints:
|
|
||||||
"checky_12345678901234567890123456789012345678901234567890" CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
|
|
||||||
|
|
||||||
Index "public.sneaky_name_lengths_int_col_1234567890123456789012345678901_key"
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths'::regclass;
|
||||||
Column | Type | Definition
|
Constraint | Definition
|
||||||
----------------------------------------------------------------+---------+----------------------------------------------------------------
|
-----------------------------------------------------------+------------------------------------------------------------------------------
|
||||||
int_col_123456789012345678901234567890123456789012345678901234 | integer | int_col_123456789012345678901234567890123456789012345678901234
|
checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
|
||||||
unique, btree, for table "public.sneaky_name_lengths"
|
(1 row)
|
||||||
|
|
||||||
SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash');
|
SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash');
|
||||||
master_create_distributed_table
|
master_create_distributed_table
|
||||||
|
@ -233,38 +216,18 @@ SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d sneaky_name_lengths*
|
\di public.sneaky*225006
|
||||||
Table "public.sneaky_name_lengths_225006"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner | Table
|
||||||
----------------------------------------------------------------+---------+-----------
|
--------+-----------------------------------------------------------------+-------+----------+----------------------------
|
||||||
int_col_123456789012345678901234567890123456789012345678901234 | integer | not null
|
public | sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225006 | index | postgres | sneaky_name_lengths_225006
|
||||||
col2 | integer | not null
|
(1 row)
|
||||||
Indexes:
|
|
||||||
"sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225006" UNIQUE CONSTRAINT, btree (int_col_123456789012345678901234567890123456789012345678901234)
|
|
||||||
Check constraints:
|
|
||||||
"checky_12345678901234567890123456789012345678901234567890" CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
|
|
||||||
|
|
||||||
Table "public.sneaky_name_lengths_225007"
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass;
|
||||||
Column | Type | Modifiers
|
Constraint | Definition
|
||||||
----------------------------------------------------------------+---------+-----------
|
-----------------------------------------------------------+------------------------------------------------------------------------------
|
||||||
int_col_123456789012345678901234567890123456789012345678901234 | integer | not null
|
checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
|
||||||
col2 | integer | not null
|
(1 row)
|
||||||
Indexes:
|
|
||||||
"sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225007" UNIQUE CONSTRAINT, btree (int_col_123456789012345678901234567890123456789012345678901234)
|
|
||||||
Check constraints:
|
|
||||||
"checky_12345678901234567890123456789012345678901234567890" CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
|
|
||||||
|
|
||||||
Index "public.sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225006"
|
|
||||||
Column | Type | Definition
|
|
||||||
----------------------------------------------------------------+---------+----------------------------------------------------------------
|
|
||||||
int_col_123456789012345678901234567890123456789012345678901234 | integer | int_col_123456789012345678901234567890123456789012345678901234
|
|
||||||
unique, btree, for table "public.sneaky_name_lengths_225006"
|
|
||||||
|
|
||||||
Index "public.sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225007"
|
|
||||||
Column | Type | Definition
|
|
||||||
----------------------------------------------------------------+---------+----------------------------------------------------------------
|
|
||||||
int_col_123456789012345678901234567890123456789012345678901234 | integer | int_col_123456789012345678901234567890123456789012345678901234
|
|
||||||
unique, btree, for table "public.sneaky_name_lengths_225007"
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP TABLE sneaky_name_lengths CASCADE;
|
DROP TABLE sneaky_name_lengths CASCADE;
|
||||||
|
@ -288,24 +251,12 @@ SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d sneaky_name_lengths*
|
\di unique*225008
|
||||||
Table "public.sneaky_name_lengths_225008"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner | Table
|
||||||
------------------------------------------------------------+---------+-----------
|
--------+-----------------------------------------------------------------+-------+----------+----------------------------
|
||||||
col1 | integer | not null
|
public | unique_1234567890123456789012345678901234567890_a5986f27_225008 | index | postgres | sneaky_name_lengths_225008
|
||||||
col2 | integer | not null
|
(1 row)
|
||||||
int_col_12345678901234567890123456789012345678901234567890 | integer | not null
|
|
||||||
Indexes:
|
|
||||||
"unique_1234567890123456789012345678901234567890_a5986f27_225008" UNIQUE CONSTRAINT, btree (col1)
|
|
||||||
|
|
||||||
Table "public.sneaky_name_lengths_225009"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
------------------------------------------------------------+---------+-----------
|
|
||||||
col1 | integer | not null
|
|
||||||
col2 | integer | not null
|
|
||||||
int_col_12345678901234567890123456789012345678901234567890 | integer | not null
|
|
||||||
Indexes:
|
|
||||||
"unique_1234567890123456789012345678901234567890_a5986f27_225009" UNIQUE CONSTRAINT, btree (col1)
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP TABLE sneaky_name_lengths CASCADE;
|
DROP TABLE sneaky_name_lengths CASCADE;
|
||||||
|
@ -327,18 +278,13 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d too_long_*
|
\dt *225000000000*
|
||||||
Table "public.too_long_1234567890123456789012345678901_e0119164_2250000000000"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
--------+---------+-----------
|
--------+-----------------------------------------------------------------+-------+----------
|
||||||
col1 | integer | not null
|
public | too_long_1234567890123456789012345678901_e0119164_2250000000000 | table | postgres
|
||||||
col2 | integer | not null
|
public | too_long_1234567890123456789012345678901_e0119164_2250000000001 | table | postgres
|
||||||
|
(2 rows)
|
||||||
Table "public.too_long_1234567890123456789012345678901_e0119164_2250000000001"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
--------+---------+-----------
|
|
||||||
col1 | integer | not null
|
|
||||||
col2 | integer | not null
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE;
|
DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE;
|
||||||
|
@ -359,34 +305,21 @@ SELECT master_create_worker_shards(U&'elephant_!0441!043B!043E!043D!0441!043B!04
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d elephant_*
|
\dt public.elephant_*
|
||||||
Index "public.elephant_слонслонслонсло_14d34928_2250000000002"
|
List of relations
|
||||||
Column | Type | Definition
|
Schema | Name | Type | Owner
|
||||||
--------+---------+------------
|
--------+-------------------------------------------------+-------+----------
|
||||||
col1 | integer | col1
|
public | elephant_слонслонслонсло_c8b737c2_2250000000002 | table | postgres
|
||||||
primary key, btree, for table "public.elephant_слонслонслонсло_c8b737c2_2250000000002"
|
public | elephant_слонслонслонсло_c8b737c2_2250000000003 | table | postgres
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
Index "public.elephant_слонслонслонсло_14d34928_2250000000003"
|
\di public.elephant_*
|
||||||
Column | Type | Definition
|
List of relations
|
||||||
--------+---------+------------
|
Schema | Name | Type | Owner | Table
|
||||||
col1 | integer | col1
|
--------+-------------------------------------------------+-------+----------+-------------------------------------------------
|
||||||
primary key, btree, for table "public.elephant_слонслонслонсло_c8b737c2_2250000000003"
|
public | elephant_слонслонслонсло_14d34928_2250000000002 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000002
|
||||||
|
public | elephant_слонслонслонсло_14d34928_2250000000003 | index | postgres | elephant_слонслонслонсло_c8b737c2_2250000000003
|
||||||
Table "public.elephant_слонслонслонсло_c8b737c2_2250000000002"
|
(2 rows)
|
||||||
Column | Type | Modifiers
|
|
||||||
--------+---------+-----------
|
|
||||||
col1 | integer | not null
|
|
||||||
col2 | integer | not null
|
|
||||||
Indexes:
|
|
||||||
"elephant_слонслонслонсло_14d34928_2250000000002" PRIMARY KEY, btree (col1)
|
|
||||||
|
|
||||||
Table "public.elephant_слонслонслонсло_c8b737c2_2250000000003"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
--------+---------+-----------
|
|
||||||
col1 | integer | not null
|
|
||||||
col2 | integer | not null
|
|
||||||
Indexes:
|
|
||||||
"elephant_слонслонслонсло_14d34928_2250000000003" PRIMARY KEY, btree (col1)
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Clean up.
|
-- Clean up.
|
||||||
|
|
|
@ -4,6 +4,14 @@
|
||||||
-- This test checks that we can handle null min/max values in shard statistics
|
-- This test checks that we can handle null min/max values in shard statistics
|
||||||
-- and that we don't partition or join prune shards that have null values.
|
-- and that we don't partition or join prune shards that have null values.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
major_version
|
||||||
|
---------------
|
||||||
|
10
|
||||||
|
(1 row)
|
||||||
|
|
||||||
SET client_min_messages TO DEBUG2;
|
SET client_min_messages TO DEBUG2;
|
||||||
SET citus.explain_all_tasks TO on;
|
SET citus.explain_all_tasks TO on;
|
||||||
-- to avoid differing explain output - executor doesn't matter,
|
-- to avoid differing explain output - executor doesn't matter,
|
||||||
|
@ -73,9 +81,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
|
@ -87,44 +95,44 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||||
-> Task
|
|
||||||
Node: host=localhost port=57638 dbname=regression
|
|
||||||
-> Aggregate
|
|
||||||
-> Merge Join
|
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
(60 rows)
|
(60 rows)
|
||||||
|
|
||||||
-- Now set the minimum value for a shard to null. Then check that we don't apply
|
-- Now set the minimum value for a shard to null. Then check that we don't apply
|
||||||
|
@ -167,9 +175,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
|
@ -181,51 +189,51 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||||
-> Task
|
|
||||||
Node: host=localhost port=57638 dbname=regression
|
|
||||||
-> Aggregate
|
|
||||||
-> Merge Join
|
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
|
||||||
-> Task
|
|
||||||
Node: host=localhost port=57637 dbname=regression
|
|
||||||
-> Aggregate
|
|
||||||
-> Merge Join
|
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
|
||||||
-> Task
|
|
||||||
Node: host=localhost port=57638 dbname=regression
|
|
||||||
-> Aggregate
|
|
||||||
-> Merge Join
|
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
|
||||||
-> Task
|
|
||||||
Node: host=localhost port=57637 dbname=regression
|
|
||||||
-> Aggregate
|
|
||||||
-> Merge Join
|
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
|
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
|
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
|
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
|
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
|
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
(67 rows)
|
(67 rows)
|
||||||
|
|
||||||
-- Next, set the maximum value for another shard to null. Then check that we
|
-- Next, set the maximum value for another shard to null. Then check that we
|
||||||
|
@ -271,9 +279,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
|
@ -285,58 +293,58 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||||
-> Task
|
|
||||||
Node: host=localhost port=57638 dbname=regression
|
|
||||||
-> Aggregate
|
|
||||||
-> Merge Join
|
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
(74 rows)
|
(74 rows)
|
||||||
|
|
||||||
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
|
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
|
||||||
|
@ -379,9 +387,9 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
|
@ -393,51 +401,51 @@ DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||||
-> Task
|
|
||||||
Node: host=localhost port=57637 dbname=regression
|
|
||||||
-> Aggregate
|
|
||||||
-> Merge Join
|
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
|
||||||
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57638 dbname=regression
|
Node: host=localhost port=57638 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate
|
-> Aggregate
|
||||||
-> Merge Join
|
-> Merge Join
|
||||||
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
|
||||||
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
(67 rows)
|
(67 rows)
|
||||||
|
|
||||||
-- Set minimum and maximum values for two shards back to their original values
|
-- Set minimum and maximum values for two shards back to their original values
|
||||||
|
|
|
@ -0,0 +1,454 @@
|
||||||
|
--
|
||||||
|
-- MULTI_NULL_MINMAX_VALUE_PRUNING
|
||||||
|
--
|
||||||
|
-- This test checks that we can handle null min/max values in shard statistics
|
||||||
|
-- and that we don't partition or join prune shards that have null values.
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
major_version
|
||||||
|
---------------
|
||||||
|
9
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SET client_min_messages TO DEBUG2;
|
||||||
|
SET citus.explain_all_tasks TO on;
|
||||||
|
-- to avoid differing explain output - executor doesn't matter,
|
||||||
|
-- because were testing pruning here.
|
||||||
|
SET citus.task_executor_type TO 'real-time';
|
||||||
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
|
SET citus.large_table_shard_count TO 2;
|
||||||
|
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000;
|
||||||
|
shardminvalue | shardmaxvalue
|
||||||
|
---------------+---------------
|
||||||
|
1 | 1509
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
|
||||||
|
shardminvalue | shardmaxvalue
|
||||||
|
---------------+---------------
|
||||||
|
1509 | 2951
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Check that partition and join pruning works when min/max values exist
|
||||||
|
-- Adding l_orderkey = 1 to make the query not router executable
|
||||||
|
EXPLAIN (COSTS FALSE)
|
||||||
|
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||||
|
QUERY PLAN
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Custom Scan (Citus Real-Time)
|
||||||
|
Task Count: 2
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Bitmap Heap Scan on lineitem_290000 lineitem
|
||||||
|
Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1))
|
||||||
|
-> BitmapOr
|
||||||
|
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||||
|
Index Cond: (l_orderkey = 9030)
|
||||||
|
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||||
|
Index Cond: (l_orderkey = 1)
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Bitmap Heap Scan on lineitem_290004 lineitem
|
||||||
|
Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1))
|
||||||
|
-> BitmapOr
|
||||||
|
-> Bitmap Index Scan on lineitem_pkey_290004
|
||||||
|
Index Cond: (l_orderkey = 9030)
|
||||||
|
-> Bitmap Index Scan on lineitem_pkey_290004
|
||||||
|
Index Cond: (l_orderkey = 1)
|
||||||
|
(21 rows)
|
||||||
|
|
||||||
|
EXPLAIN (COSTS FALSE)
|
||||||
|
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||||
|
WHERE l_orderkey = o_orderkey;
|
||||||
|
DEBUG: join prunable for intervals [1,1509] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
|
QUERY PLAN
|
||||||
|
------------------------------------------------------------------------------------------------------
|
||||||
|
Aggregate
|
||||||
|
-> Custom Scan (Citus Real-Time)
|
||||||
|
Task Count: 8
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
|
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||||
|
(60 rows)
|
||||||
|
|
||||||
|
-- Now set the minimum value for a shard to null. Then check that we don't apply
|
||||||
|
-- partition or join pruning for the shard with null min value.
|
||||||
|
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
|
||||||
|
EXPLAIN (COSTS FALSE)
|
||||||
|
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||||
|
QUERY PLAN
|
||||||
|
-------------------------------------------------------------------------------
|
||||||
|
Custom Scan (Citus Real-Time)
|
||||||
|
Task Count: 2
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||||
|
Index Cond: (l_orderkey = 9030)
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
Index Cond: (l_orderkey = 9030)
|
||||||
|
(11 rows)
|
||||||
|
|
||||||
|
EXPLAIN (COSTS FALSE)
|
||||||
|
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||||
|
WHERE l_orderkey = o_orderkey;
|
||||||
|
DEBUG: join prunable for intervals [1509,2951] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
|
QUERY PLAN
|
||||||
|
------------------------------------------------------------------------------------------------------
|
||||||
|
Aggregate
|
||||||
|
-> Custom Scan (Citus Real-Time)
|
||||||
|
Task Count: 9
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
|
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||||
|
(67 rows)
|
||||||
|
|
||||||
|
-- Next, set the maximum value for another shard to null. Then check that we
|
||||||
|
-- don't apply partition or join pruning for this other shard either.
|
||||||
|
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
|
||||||
|
EXPLAIN (COSTS FALSE)
|
||||||
|
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||||
|
QUERY PLAN
|
||||||
|
-------------------------------------------------------------------------------
|
||||||
|
Custom Scan (Citus Real-Time)
|
||||||
|
Task Count: 3
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
Index Cond: (l_orderkey = 9030)
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||||
|
Index Cond: (l_orderkey = 9030)
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
Index Cond: (l_orderkey = 9030)
|
||||||
|
(15 rows)
|
||||||
|
|
||||||
|
EXPLAIN (COSTS FALSE)
|
||||||
|
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||||
|
WHERE l_orderkey = o_orderkey;
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
|
QUERY PLAN
|
||||||
|
------------------------------------------------------------------------------------------------------
|
||||||
|
Aggregate
|
||||||
|
-> Custom Scan (Citus Real-Time)
|
||||||
|
Task Count: 10
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
|
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||||
|
(74 rows)
|
||||||
|
|
||||||
|
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
|
||||||
|
-- should apply partition and join pruning for this shard now.
|
||||||
|
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||||
|
EXPLAIN (COSTS FALSE)
|
||||||
|
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||||
|
QUERY PLAN
|
||||||
|
-------------------------------------------------------------------------------
|
||||||
|
Custom Scan (Citus Real-Time)
|
||||||
|
Task Count: 2
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
Index Cond: (l_orderkey = 9030)
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
Index Cond: (l_orderkey = 9030)
|
||||||
|
(11 rows)
|
||||||
|
|
||||||
|
EXPLAIN (COSTS FALSE)
|
||||||
|
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||||
|
WHERE l_orderkey = o_orderkey;
|
||||||
|
DEBUG: join prunable for intervals [0,1509] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [2951,4455] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [4480,5986] and [8997,14946]
|
||||||
|
DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||||
|
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
|
QUERY PLAN
|
||||||
|
------------------------------------------------------------------------------------------------------
|
||||||
|
Aggregate
|
||||||
|
-> Custom Scan (Citus Real-Time)
|
||||||
|
Task Count: 9
|
||||||
|
Tasks Shown: All
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||||
|
-> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290008 on orders_290008 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57638 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem
|
||||||
|
-> Task
|
||||||
|
Node: host=localhost port=57637 dbname=regression
|
||||||
|
-> Aggregate
|
||||||
|
-> Merge Join
|
||||||
|
Merge Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||||
|
-> Index Only Scan using orders_pkey_290009 on orders_290009 orders
|
||||||
|
-> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem
|
||||||
|
(67 rows)
|
||||||
|
|
||||||
|
-- Set minimum and maximum values for two shards back to their original values
|
||||||
|
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000;
|
||||||
|
UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 290001;
|
||||||
|
SET client_min_messages TO NOTICE;
|
|
@ -1331,43 +1331,61 @@ ALTER TABLE reference_table_ddl DROP COLUMN value_1;
|
||||||
ALTER TABLE reference_table_ddl ALTER COLUMN value_2 SET DEFAULT 25.0;
|
ALTER TABLE reference_table_ddl ALTER COLUMN value_2 SET DEFAULT 25.0;
|
||||||
ALTER TABLE reference_table_ddl ALTER COLUMN value_3 SET NOT NULL;
|
ALTER TABLE reference_table_ddl ALTER COLUMN value_3 SET NOT NULL;
|
||||||
-- see that Citus applied all DDLs to the table
|
-- see that Citus applied all DDLs to the table
|
||||||
\d reference_table_ddl
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl'::regclass;
|
||||||
Table "public.reference_table_ddl"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
---------+-----------------------------+--------------
|
---------+-----------------------------+--------------
|
||||||
value_2 | double precision | default 25.0
|
value_2 | double precision | default 25.0
|
||||||
value_3 | text | not null
|
value_3 | text | not null
|
||||||
value_4 | timestamp without time zone |
|
value_4 | timestamp without time zone |
|
||||||
value_5 | double precision |
|
value_5 | double precision |
|
||||||
Indexes:
|
(4 rows)
|
||||||
"reference_index_2" btree (value_2, value_3)
|
|
||||||
|
\d reference_index_2
|
||||||
|
Index "public.reference_index_2"
|
||||||
|
Column | Type | Definition
|
||||||
|
---------+------------------+------------
|
||||||
|
value_2 | double precision | value_2
|
||||||
|
value_3 | text | value_3
|
||||||
|
btree, for table "public.reference_table_ddl"
|
||||||
|
|
||||||
-- also to the shard placements
|
-- also to the shard placements
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d reference_table_ddl*
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl_1250019'::regclass;
|
||||||
Table "public.reference_table_ddl_1250019"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
---------+-----------------------------+--------------
|
---------+-----------------------------+--------------
|
||||||
value_2 | double precision | default 25.0
|
value_2 | double precision | default 25.0
|
||||||
value_3 | text | not null
|
value_3 | text | not null
|
||||||
value_4 | timestamp without time zone |
|
value_4 | timestamp without time zone |
|
||||||
value_5 | double precision |
|
value_5 | double precision |
|
||||||
Indexes:
|
(4 rows)
|
||||||
"reference_index_2_1250019" btree (value_2, value_3)
|
|
||||||
|
\d reference_index_2_1250019
|
||||||
|
Index "public.reference_index_2_1250019"
|
||||||
|
Column | Type | Definition
|
||||||
|
---------+------------------+------------
|
||||||
|
value_2 | double precision | value_2
|
||||||
|
value_3 | text | value_3
|
||||||
|
btree, for table "public.reference_table_ddl_1250019"
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP INDEX reference_index_2;
|
DROP INDEX reference_index_2;
|
||||||
NOTICE: using one-phase commit for distributed DDL commands
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d reference_table_ddl*
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl_1250019'::regclass;
|
||||||
Table "public.reference_table_ddl_1250019"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
---------+-----------------------------+--------------
|
---------+-----------------------------+--------------
|
||||||
value_2 | double precision | default 25.0
|
value_2 | double precision | default 25.0
|
||||||
value_3 | text | not null
|
value_3 | text | not null
|
||||||
value_4 | timestamp without time zone |
|
value_4 | timestamp without time zone |
|
||||||
value_5 | double precision |
|
value_5 | double precision |
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
|
\di reference_index_2*
|
||||||
|
List of relations
|
||||||
|
Schema | Name | Type | Owner | Table
|
||||||
|
--------+------+------+-------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- as we expect, renaming and setting WITH OIDS does not work for reference tables
|
-- as we expect, renaming and setting WITH OIDS does not work for reference tables
|
||||||
|
|
|
@ -631,12 +631,12 @@ WHERE
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- verify table structure is changed
|
-- verify table structure is changed
|
||||||
\d remove_node_reference_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_node_reference_table'::regclass;
|
||||||
Table "public.remove_node_reference_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
---------+---------+-----------
|
---------+---------+-----------
|
||||||
column1 | integer |
|
column1 | integer |
|
||||||
column2 | integer |
|
column2 | integer |
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
-- re-add the node for next tests
|
-- re-add the node for next tests
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
|
|
|
@ -586,8 +586,7 @@ ALTER TABLE test_schema_support.nation_hash ADD COLUMN new_col INT;
|
||||||
NOTICE: using one-phase commit for distributed DDL commands
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
-- verify column is added
|
-- verify column is added
|
||||||
\d test_schema_support.nation_hash;
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||||
Table "test_schema_support.nation_hash"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-------------+------------------------+-----------
|
-------------+------------------------+-----------
|
||||||
n_nationkey | integer | not null
|
n_nationkey | integer | not null
|
||||||
|
@ -595,10 +594,10 @@ HINT: You can enable two-phase commit for extra safety with: SET citus.multi_sh
|
||||||
n_regionkey | integer | not null
|
n_regionkey | integer | not null
|
||||||
n_comment | character varying(152) |
|
n_comment | character varying(152) |
|
||||||
new_col | integer |
|
new_col | integer |
|
||||||
|
(5 rows)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||||
Table "test_schema_support.nation_hash_1190003"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-------------+------------------------+-----------
|
-------------+------------------------+-----------
|
||||||
n_nationkey | integer | not null
|
n_nationkey | integer | not null
|
||||||
|
@ -606,6 +605,7 @@ HINT: You can enable two-phase commit for extra safety with: SET citus.multi_sh
|
||||||
n_regionkey | integer | not null
|
n_regionkey | integer | not null
|
||||||
n_comment | character varying(152) |
|
n_comment | character varying(152) |
|
||||||
new_col | integer |
|
new_col | integer |
|
||||||
|
(5 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS non_existent_column;
|
ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS non_existent_column;
|
||||||
|
@ -614,24 +614,24 @@ NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col;
|
ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col;
|
||||||
-- verify column is dropped
|
-- verify column is dropped
|
||||||
\d test_schema_support.nation_hash;
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||||
Table "test_schema_support.nation_hash"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-------------+------------------------+-----------
|
-------------+------------------------+-----------
|
||||||
n_nationkey | integer | not null
|
n_nationkey | integer | not null
|
||||||
n_name | character(25) | not null
|
n_name | character(25) | not null
|
||||||
n_regionkey | integer | not null
|
n_regionkey | integer | not null
|
||||||
n_comment | character varying(152) |
|
n_comment | character varying(152) |
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||||
Table "test_schema_support.nation_hash_1190003"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-------------+------------------------+-----------
|
-------------+------------------------+-----------
|
||||||
n_nationkey | integer | not null
|
n_nationkey | integer | not null
|
||||||
n_name | character(25) | not null
|
n_name | character(25) | not null
|
||||||
n_regionkey | integer | not null
|
n_regionkey | integer | not null
|
||||||
n_comment | character varying(152) |
|
n_comment | character varying(152) |
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
--test with search_path is set
|
--test with search_path is set
|
||||||
|
@ -640,8 +640,7 @@ ALTER TABLE nation_hash ADD COLUMN new_col INT;
|
||||||
NOTICE: using one-phase commit for distributed DDL commands
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
-- verify column is added
|
-- verify column is added
|
||||||
\d test_schema_support.nation_hash;
|
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||||
Table "test_schema_support.nation_hash"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-------------+------------------------+-----------
|
-------------+------------------------+-----------
|
||||||
n_nationkey | integer | not null
|
n_nationkey | integer | not null
|
||||||
|
@ -649,10 +648,10 @@ HINT: You can enable two-phase commit for extra safety with: SET citus.multi_sh
|
||||||
n_regionkey | integer | not null
|
n_regionkey | integer | not null
|
||||||
n_comment | character varying(152) |
|
n_comment | character varying(152) |
|
||||||
new_col | integer |
|
new_col | integer |
|
||||||
|
(5 rows)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||||
Table "test_schema_support.nation_hash_1190003"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-------------+------------------------+-----------
|
-------------+------------------------+-----------
|
||||||
n_nationkey | integer | not null
|
n_nationkey | integer | not null
|
||||||
|
@ -660,6 +659,7 @@ HINT: You can enable two-phase commit for extra safety with: SET citus.multi_sh
|
||||||
n_regionkey | integer | not null
|
n_regionkey | integer | not null
|
||||||
n_comment | character varying(152) |
|
n_comment | character varying(152) |
|
||||||
new_col | integer |
|
new_col | integer |
|
||||||
|
(5 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET search_path TO test_schema_support;
|
SET search_path TO test_schema_support;
|
||||||
|
@ -669,24 +669,24 @@ NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col;
|
ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col;
|
||||||
-- verify column is dropped
|
-- verify column is dropped
|
||||||
\d test_schema_support.nation_hash;
|
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||||
Table "test_schema_support.nation_hash"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-------------+------------------------+-----------
|
-------------+------------------------+-----------
|
||||||
n_nationkey | integer | not null
|
n_nationkey | integer | not null
|
||||||
n_name | character(25) | not null
|
n_name | character(25) | not null
|
||||||
n_regionkey | integer | not null
|
n_regionkey | integer | not null
|
||||||
n_comment | character varying(152) |
|
n_comment | character varying(152) |
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||||
Table "test_schema_support.nation_hash_1190003"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-------------+------------------------+-----------
|
-------------+------------------------+-----------
|
||||||
n_nationkey | integer | not null
|
n_nationkey | integer | not null
|
||||||
n_name | character(25) | not null
|
n_name | character(25) | not null
|
||||||
n_regionkey | integer | not null
|
n_regionkey | integer | not null
|
||||||
n_comment | character varying(152) |
|
n_comment | character varying(152) |
|
||||||
|
(4 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- test CREATE/DROP INDEX with schemas
|
-- test CREATE/DROP INDEX with schemas
|
||||||
|
@ -696,28 +696,20 @@ CREATE INDEX index1 ON test_schema_support.nation_hash(n_name);
|
||||||
NOTICE: using one-phase commit for distributed DDL commands
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
--verify INDEX is created
|
--verify INDEX is created
|
||||||
\d test_schema_support.nation_hash;
|
\d test_schema_support.index1
|
||||||
Table "test_schema_support.nation_hash"
|
Index "test_schema_support.index1"
|
||||||
Column | Type | Modifiers
|
Column | Type | Definition
|
||||||
-------------+------------------------+-----------
|
--------+---------------+------------
|
||||||
n_nationkey | integer | not null
|
n_name | character(25) | n_name
|
||||||
n_name | character(25) | not null
|
btree, for table "test_schema_support.nation_hash"
|
||||||
n_regionkey | integer | not null
|
|
||||||
n_comment | character varying(152) |
|
|
||||||
Indexes:
|
|
||||||
"index1" btree (n_name)
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
\d test_schema_support.index1_1190003
|
||||||
Table "test_schema_support.nation_hash_1190003"
|
Index "test_schema_support.index1_1190003"
|
||||||
Column | Type | Modifiers
|
Column | Type | Definition
|
||||||
-------------+------------------------+-----------
|
--------+---------------+------------
|
||||||
n_nationkey | integer | not null
|
n_name | character(25) | n_name
|
||||||
n_name | character(25) | not null
|
btree, for table "test_schema_support.nation_hash_1190003"
|
||||||
n_regionkey | integer | not null
|
|
||||||
n_comment | character varying(152) |
|
|
||||||
Indexes:
|
|
||||||
"index1_1190003" btree (n_name)
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- DROP index
|
-- DROP index
|
||||||
|
@ -725,25 +717,9 @@ DROP INDEX test_schema_support.index1;
|
||||||
NOTICE: using one-phase commit for distributed DDL commands
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
--verify INDEX is dropped
|
--verify INDEX is dropped
|
||||||
\d test_schema_support.nation_hash;
|
\d test_schema_support.index1
|
||||||
Table "test_schema_support.nation_hash"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
-------------+------------------------+-----------
|
|
||||||
n_nationkey | integer | not null
|
|
||||||
n_name | character(25) | not null
|
|
||||||
n_regionkey | integer | not null
|
|
||||||
n_comment | character varying(152) |
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
\d test_schema_support.index1_1190003
|
||||||
Table "test_schema_support.nation_hash_1190003"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
-------------+------------------------+-----------
|
|
||||||
n_nationkey | integer | not null
|
|
||||||
n_name | character(25) | not null
|
|
||||||
n_regionkey | integer | not null
|
|
||||||
n_comment | character varying(152) |
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
--test with search_path is set
|
--test with search_path is set
|
||||||
SET search_path TO test_schema_support;
|
SET search_path TO test_schema_support;
|
||||||
|
@ -752,28 +728,20 @@ CREATE INDEX index1 ON nation_hash(n_name);
|
||||||
NOTICE: using one-phase commit for distributed DDL commands
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
--verify INDEX is created
|
--verify INDEX is created
|
||||||
\d test_schema_support.nation_hash;
|
\d test_schema_support.index1
|
||||||
Table "test_schema_support.nation_hash"
|
Index "test_schema_support.index1"
|
||||||
Column | Type | Modifiers
|
Column | Type | Definition
|
||||||
-------------+------------------------+-----------
|
--------+---------------+------------
|
||||||
n_nationkey | integer | not null
|
n_name | character(25) | n_name
|
||||||
n_name | character(25) | not null
|
btree, for table "test_schema_support.nation_hash"
|
||||||
n_regionkey | integer | not null
|
|
||||||
n_comment | character varying(152) |
|
|
||||||
Indexes:
|
|
||||||
"index1" btree (n_name)
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
\d test_schema_support.index1_1190003
|
||||||
Table "test_schema_support.nation_hash_1190003"
|
Index "test_schema_support.index1_1190003"
|
||||||
Column | Type | Modifiers
|
Column | Type | Definition
|
||||||
-------------+------------------------+-----------
|
--------+---------------+------------
|
||||||
n_nationkey | integer | not null
|
n_name | character(25) | n_name
|
||||||
n_name | character(25) | not null
|
btree, for table "test_schema_support.nation_hash_1190003"
|
||||||
n_regionkey | integer | not null
|
|
||||||
n_comment | character varying(152) |
|
|
||||||
Indexes:
|
|
||||||
"index1_1190003" btree (n_name)
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- DROP index
|
-- DROP index
|
||||||
|
@ -782,25 +750,9 @@ DROP INDEX index1;
|
||||||
NOTICE: using one-phase commit for distributed DDL commands
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
--verify INDEX is dropped
|
--verify INDEX is dropped
|
||||||
\d test_schema_support.nation_hash;
|
\d test_schema_support.index1
|
||||||
Table "test_schema_support.nation_hash"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
-------------+------------------------+-----------
|
|
||||||
n_nationkey | integer | not null
|
|
||||||
n_name | character(25) | not null
|
|
||||||
n_regionkey | integer | not null
|
|
||||||
n_comment | character varying(152) |
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
\d test_schema_support.index1_1190003
|
||||||
Table "test_schema_support.nation_hash_1190003"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
-------------+------------------------+-----------
|
|
||||||
n_nationkey | integer | not null
|
|
||||||
n_name | character(25) | not null
|
|
||||||
n_regionkey | integer | not null
|
|
||||||
n_comment | character varying(152) |
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- test master_copy_shard_placement with schemas
|
-- test master_copy_shard_placement with schemas
|
||||||
SET search_path TO public;
|
SET search_path TO public;
|
||||||
|
|
|
@ -2,6 +2,14 @@
|
||||||
-- MULTI_TASK_ASSIGNMENT
|
-- MULTI_TASK_ASSIGNMENT
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
major_version
|
||||||
|
---------------
|
||||||
|
10
|
||||||
|
(1 row)
|
||||||
|
|
||||||
SET citus.explain_distributed_queries TO off;
|
SET citus.explain_distributed_queries TO off;
|
||||||
-- Check that our policies for assigning tasks to worker nodes run as expected.
|
-- Check that our policies for assigning tasks to worker nodes run as expected.
|
||||||
-- To test this, we first create a shell table, and then manually insert shard
|
-- To test this, we first create a shell table, and then manually insert shard
|
||||||
|
@ -46,19 +54,12 @@ BEGIN;
|
||||||
-- the following log messages print node name and port numbers; and node numbers
|
-- the following log messages print node name and port numbers; and node numbers
|
||||||
-- in regression tests depend upon PG_VERSION_NUM.
|
-- in regression tests depend upon PG_VERSION_NUM.
|
||||||
SET client_min_messages TO DEBUG3;
|
SET client_min_messages TO DEBUG3;
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
-- First test the default greedy task assignment policy
|
-- First test the default greedy task assignment policy
|
||||||
SET citus.task_assignment_policy TO 'greedy';
|
SET citus.task_assignment_policy TO 'greedy';
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
DEBUG: assigned task 2 to node localhost:57638
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
DEBUG: assigned task 4 to node localhost:57637
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
-----------------------------------------------------------------------
|
-----------------------------------------------------------------------
|
||||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
@ -67,12 +68,9 @@ DEBUG: CommitTransactionCommand
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
DEBUG: assigned task 2 to node localhost:57638
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
DEBUG: assigned task 4 to node localhost:57637
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
-----------------------------------------------------------------------
|
-----------------------------------------------------------------------
|
||||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
@ -82,16 +80,10 @@ DEBUG: CommitTransactionCommand
|
||||||
|
|
||||||
-- Next test the first-replica task assignment policy
|
-- Next test the first-replica task assignment policy
|
||||||
SET citus.task_assignment_policy TO 'first-replica';
|
SET citus.task_assignment_policy TO 'first-replica';
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
DEBUG: assigned task 4 to node localhost:57637
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
DEBUG: assigned task 2 to node localhost:57638
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
-----------------------------------------------------------------------
|
-----------------------------------------------------------------------
|
||||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
@ -100,12 +92,9 @@ DEBUG: CommitTransactionCommand
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
DEBUG: assigned task 4 to node localhost:57637
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
DEBUG: assigned task 2 to node localhost:57638
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
-----------------------------------------------------------------------
|
-----------------------------------------------------------------------
|
||||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
@ -115,16 +104,10 @@ DEBUG: CommitTransactionCommand
|
||||||
|
|
||||||
-- Finally test the round-robin task assignment policy
|
-- Finally test the round-robin task assignment policy
|
||||||
SET citus.task_assignment_policy TO 'round-robin';
|
SET citus.task_assignment_policy TO 'round-robin';
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: assigned task 6 to node localhost:57638
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
DEBUG: assigned task 4 to node localhost:57638
|
DEBUG: assigned task 4 to node localhost:57638
|
||||||
DEBUG: assigned task 2 to node localhost:57637
|
DEBUG: assigned task 2 to node localhost:57637
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
-----------------------------------------------------------------------
|
-----------------------------------------------------------------------
|
||||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
@ -133,12 +116,9 @@ DEBUG: CommitTransactionCommand
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
DEBUG: assigned task 4 to node localhost:57637
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
DEBUG: assigned task 2 to node localhost:57638
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
-----------------------------------------------------------------------
|
-----------------------------------------------------------------------
|
||||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
@ -147,12 +127,9 @@ DEBUG: CommitTransactionCommand
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: assigned task 6 to node localhost:57638
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
DEBUG: assigned task 4 to node localhost:57638
|
DEBUG: assigned task 4 to node localhost:57638
|
||||||
DEBUG: assigned task 2 to node localhost:57637
|
DEBUG: assigned task 2 to node localhost:57637
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
-----------------------------------------------------------------------
|
-----------------------------------------------------------------------
|
||||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
@ -161,10 +138,5 @@ DEBUG: CommitTransactionCommand
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
RESET citus.task_assignment_policy;
|
RESET citus.task_assignment_policy;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
RESET client_min_messages;
|
RESET client_min_messages;
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
COMMIT;
|
COMMIT;
|
||||||
|
|
|
@ -0,0 +1,178 @@
|
||||||
|
--
|
||||||
|
-- MULTI_TASK_ASSIGNMENT
|
||||||
|
--
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
major_version
|
||||||
|
---------------
|
||||||
|
9
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SET citus.explain_distributed_queries TO off;
|
||||||
|
-- Check that our policies for assigning tasks to worker nodes run as expected.
|
||||||
|
-- To test this, we first create a shell table, and then manually insert shard
|
||||||
|
-- and shard placement data into system catalogs. We next run Explain command,
|
||||||
|
-- and check that tasks are assigned to worker nodes as expected.
|
||||||
|
CREATE TABLE task_assignment_test_table (test_id integer);
|
||||||
|
SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append');
|
||||||
|
master_create_distributed_table
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Create logical shards with shardids 200, 201, and 202
|
||||||
|
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue)
|
||||||
|
SELECT pg_class.oid, series.index, 'r', 1, 1000
|
||||||
|
FROM pg_class, generate_series(200, 202) AS series(index)
|
||||||
|
WHERE pg_class.relname = 'task_assignment_test_table';
|
||||||
|
-- Create shard placements for shard 200 and 201
|
||||||
|
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport)
|
||||||
|
SELECT 200, 1, 1, nodename, nodeport
|
||||||
|
FROM pg_dist_shard_placement
|
||||||
|
GROUP BY nodename, nodeport
|
||||||
|
ORDER BY nodename, nodeport ASC
|
||||||
|
LIMIT 2;
|
||||||
|
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport)
|
||||||
|
SELECT 201, 1, 1, nodename, nodeport
|
||||||
|
FROM pg_dist_shard_placement
|
||||||
|
GROUP BY nodename, nodeport
|
||||||
|
ORDER BY nodename, nodeport ASC
|
||||||
|
LIMIT 2;
|
||||||
|
-- Create shard placements for shard 202
|
||||||
|
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport)
|
||||||
|
SELECT 202, 1, 1, nodename, nodeport
|
||||||
|
FROM pg_dist_shard_placement
|
||||||
|
GROUP BY nodename, nodeport
|
||||||
|
ORDER BY nodename, nodeport DESC
|
||||||
|
LIMIT 2;
|
||||||
|
-- Start transaction block to avoid auto commits. This avoids additional debug
|
||||||
|
-- messages from getting printed at real transaction starts and commits.
|
||||||
|
BEGIN;
|
||||||
|
-- Increase log level to see which worker nodes tasks are assigned to. Note that
|
||||||
|
-- the following log messages print node name and port numbers; and node numbers
|
||||||
|
-- in regression tests depend upon PG_VERSION_NUM.
|
||||||
|
SET client_min_messages TO DEBUG3;
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
-- First test the default greedy task assignment policy
|
||||||
|
SET citus.task_assignment_policy TO 'greedy';
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
QUERY PLAN
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
QUERY PLAN
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- Next test the first-replica task assignment policy
|
||||||
|
SET citus.task_assignment_policy TO 'first-replica';
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
QUERY PLAN
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
QUERY PLAN
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- Finally test the round-robin task assignment policy
|
||||||
|
SET citus.task_assignment_policy TO 'round-robin';
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
|
DEBUG: assigned task 4 to node localhost:57638
|
||||||
|
DEBUG: assigned task 2 to node localhost:57637
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
QUERY PLAN
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
QUERY PLAN
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
|
DEBUG: assigned task 4 to node localhost:57638
|
||||||
|
DEBUG: assigned task 2 to node localhost:57637
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
QUERY PLAN
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
RESET citus.task_assignment_policy;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
RESET client_min_messages;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
COMMIT;
|
|
@ -0,0 +1,86 @@
|
||||||
|
-- File to create functions and helpers needed for subsequent tests
|
||||||
|
-- create a helper function to create objects on each node
|
||||||
|
CREATE FUNCTION run_command_on_master_and_workers(p_sql text)
|
||||||
|
RETURNS void LANGUAGE plpgsql AS $$
|
||||||
|
BEGIN
|
||||||
|
EXECUTE p_sql;
|
||||||
|
PERFORM run_command_on_workers(p_sql);
|
||||||
|
END;$$;
|
||||||
|
-- The following views are intended as alternatives to \d commands, whose
|
||||||
|
-- output changed in PostgreSQL 10. In particular, they must be used any time
|
||||||
|
-- a test wishes to print out the structure of a relation, which previously
|
||||||
|
-- was safely accomplished by a \d invocation.
|
||||||
|
SELECT run_command_on_master_and_workers(
|
||||||
|
$desc_views$
|
||||||
|
CREATE VIEW table_fkey_cols AS
|
||||||
|
SELECT rc.constraint_name AS "name",
|
||||||
|
kcu.column_name AS "column_name",
|
||||||
|
uc_kcu.column_name AS "refd_column_name",
|
||||||
|
format('%I.%I', kcu.table_schema, kcu.table_name)::regclass::oid AS relid,
|
||||||
|
format('%I.%I', uc_kcu.table_schema, uc_kcu.table_name)::regclass::oid AS refd_relid
|
||||||
|
FROM information_schema.referential_constraints rc,
|
||||||
|
information_schema.key_column_usage kcu,
|
||||||
|
information_schema.key_column_usage uc_kcu
|
||||||
|
WHERE rc.constraint_schema = kcu.constraint_schema AND
|
||||||
|
rc.constraint_name = kcu.constraint_name AND
|
||||||
|
rc.unique_constraint_schema = uc_kcu.constraint_schema AND
|
||||||
|
rc.unique_constraint_name = uc_kcu.constraint_name;
|
||||||
|
|
||||||
|
CREATE VIEW table_fkeys AS
|
||||||
|
SELECT name AS "Constraint",
|
||||||
|
format('FOREIGN KEY (%s) REFERENCES %s(%s)',
|
||||||
|
string_agg(DISTINCT quote_ident(column_name), ', '),
|
||||||
|
string_agg(DISTINCT refd_relid::regclass::text, ', '),
|
||||||
|
string_agg(DISTINCT quote_ident(refd_column_name), ', ')) AS "Definition",
|
||||||
|
"relid"
|
||||||
|
FROM table_fkey_cols
|
||||||
|
GROUP BY (name, relid);
|
||||||
|
|
||||||
|
CREATE VIEW table_attrs AS
|
||||||
|
SELECT c.column_name AS "name",
|
||||||
|
c.data_type AS "type",
|
||||||
|
CASE
|
||||||
|
WHEN character_maximum_length IS NOT NULL THEN
|
||||||
|
format('(%s)', character_maximum_length)
|
||||||
|
WHEN data_type = 'numeric' AND numeric_precision IS NOT NULL THEN
|
||||||
|
format('(%s,%s)', numeric_precision, numeric_scale)
|
||||||
|
ELSE ''
|
||||||
|
END AS "modifier",
|
||||||
|
c.column_default AS "default",
|
||||||
|
(NOT c.is_nullable::boolean) AS "notnull",
|
||||||
|
format('%I.%I', c.table_schema, c.table_name)::regclass::oid AS "relid"
|
||||||
|
FROM information_schema.columns AS c
|
||||||
|
ORDER BY ordinal_position;
|
||||||
|
|
||||||
|
CREATE VIEW table_desc AS
|
||||||
|
SELECT "name" AS "Column",
|
||||||
|
"type" || "modifier" AS "Type",
|
||||||
|
rtrim((
|
||||||
|
CASE "notnull"
|
||||||
|
WHEN true THEN 'not null '
|
||||||
|
ELSE ''
|
||||||
|
END
|
||||||
|
) || (
|
||||||
|
CASE WHEN "default" IS NULL THEN ''
|
||||||
|
ELSE 'default ' || "default"
|
||||||
|
END
|
||||||
|
)) AS "Modifiers",
|
||||||
|
"relid"
|
||||||
|
FROM table_attrs;
|
||||||
|
|
||||||
|
CREATE VIEW table_checks AS
|
||||||
|
SELECT cc.constraint_name AS "Constraint",
|
||||||
|
('CHECK ' || regexp_replace(check_clause, '^\((.*)\)$', '\1')) AS "Definition",
|
||||||
|
format('%I.%I', ccu.table_schema, ccu.table_name)::regclass::oid AS relid
|
||||||
|
FROM information_schema.check_constraints cc,
|
||||||
|
information_schema.constraint_column_usage ccu
|
||||||
|
WHERE cc.constraint_schema = ccu.constraint_schema AND
|
||||||
|
cc.constraint_name = ccu.constraint_name
|
||||||
|
ORDER BY cc.constraint_name ASC;
|
||||||
|
$desc_views$
|
||||||
|
);
|
||||||
|
run_command_on_master_and_workers
|
||||||
|
-----------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
|
@ -46,34 +46,24 @@ ORDER BY
|
||||||
(8 rows)
|
(8 rows)
|
||||||
|
|
||||||
-- verify table is not dropped
|
-- verify table is not dropped
|
||||||
\d transactional_drop_shards;
|
\dt transactional_drop_shards
|
||||||
Table "public.transactional_drop_shards"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+---------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_shards | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- verify shards are not dropped
|
-- verify shards are not dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_shards_*;
|
\dt transactional_drop_shards_*
|
||||||
Table "public.transactional_drop_shards_1410000"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+-----------------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_shards_1410000 | table | postgres
|
||||||
|
public | transactional_drop_shards_1410001 | table | postgres
|
||||||
Table "public.transactional_drop_shards_1410001"
|
public | transactional_drop_shards_1410002 | table | postgres
|
||||||
Column | Type | Modifiers
|
public | transactional_drop_shards_1410003 | table | postgres
|
||||||
---------+---------+-----------
|
(4 rows)
|
||||||
column1 | integer |
|
|
||||||
|
|
||||||
Table "public.transactional_drop_shards_1410002"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
---------+---------+-----------
|
|
||||||
column1 | integer |
|
|
||||||
|
|
||||||
Table "public.transactional_drop_shards_1410003"
|
|
||||||
Column | Type | Modifiers
|
|
||||||
---------+---------+-----------
|
|
||||||
column1 | integer |
|
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT
|
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT
|
||||||
|
@ -99,10 +89,20 @@ ORDER BY
|
||||||
(0 rows)
|
(0 rows)
|
||||||
|
|
||||||
-- verify table is dropped
|
-- verify table is dropped
|
||||||
\d transactional_drop_shards;
|
\dt transactional_drop_shards
|
||||||
|
List of relations
|
||||||
|
Schema | Name | Type | Owner
|
||||||
|
--------+------+------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
-- verify shards are dropped
|
-- verify shards are dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_shards_*;
|
\dt transactional_drop_shards_*
|
||||||
|
List of relations
|
||||||
|
Schema | Name | Type | Owner
|
||||||
|
--------+------+------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- test master_delete_protocol in transaction, then ROLLBACK
|
-- test master_delete_protocol in transaction, then ROLLBACK
|
||||||
CREATE TABLE transactional_drop_shards(column1 int);
|
CREATE TABLE transactional_drop_shards(column1 int);
|
||||||
|
@ -149,11 +149,12 @@ ORDER BY
|
||||||
|
|
||||||
-- verify shards are not dropped
|
-- verify shards are not dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_shards_*;
|
\dt transactional_drop_shards_*
|
||||||
Table "public.transactional_drop_shards_1410004"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+-----------------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_shards_1410004 | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- test master_delete_protocol in transaction, then COMMIT
|
-- test master_delete_protocol in transaction, then COMMIT
|
||||||
|
@ -185,7 +186,12 @@ ORDER BY
|
||||||
|
|
||||||
-- verify shards are dropped
|
-- verify shards are dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_shards_*;
|
\dt transactional_drop_shards_*
|
||||||
|
List of relations
|
||||||
|
Schema | Name | Type | Owner
|
||||||
|
--------+------+------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- test DROP table in a transaction after insertion
|
-- test DROP table in a transaction after insertion
|
||||||
SELECT master_create_empty_shard('transactional_drop_shards');
|
SELECT master_create_empty_shard('transactional_drop_shards');
|
||||||
|
@ -220,19 +226,21 @@ ORDER BY
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
-- verify table is not dropped
|
-- verify table is not dropped
|
||||||
\d transactional_drop_shards;
|
\dt transactional_drop_shards
|
||||||
Table "public.transactional_drop_shards"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+---------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_shards | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- verify shards are not dropped
|
-- verify shards are not dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_shards_*;
|
\dt transactional_drop_shards_*
|
||||||
Table "public.transactional_drop_shards_1410005"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+-----------------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_shards_1410005 | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- test master_apply_delete_command in a transaction after insertion
|
-- test master_apply_delete_command in a transaction after insertion
|
||||||
|
@ -268,11 +276,12 @@ ORDER BY
|
||||||
|
|
||||||
-- verify shards are not dropped
|
-- verify shards are not dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_shards_*;
|
\dt transactional_drop_shards_*
|
||||||
Table "public.transactional_drop_shards_1410005"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+-----------------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_shards_1410005 | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- test DROP table with failing worker
|
-- test DROP table with failing worker
|
||||||
CREATE FUNCTION fail_drop_table() RETURNS event_trigger AS $fdt$
|
CREATE FUNCTION fail_drop_table() RETURNS event_trigger AS $fdt$
|
||||||
|
@ -308,19 +317,21 @@ ORDER BY
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
-- verify table is not dropped
|
-- verify table is not dropped
|
||||||
\d transactional_drop_shards;
|
\dt transactional_drop_shards
|
||||||
Table "public.transactional_drop_shards"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+---------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_shards | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- verify shards are not dropped
|
-- verify shards are not dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_shards_*;
|
\dt transactional_drop_shards_*
|
||||||
Table "public.transactional_drop_shards_1410005"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+-----------------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_shards_1410005 | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- test DROP reference table with failing worker
|
-- test DROP reference table with failing worker
|
||||||
|
@ -357,19 +368,21 @@ ORDER BY
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
-- verify table is not dropped
|
-- verify table is not dropped
|
||||||
\d transactional_drop_reference;
|
\dt transactional_drop_reference
|
||||||
Table "public.transactional_drop_reference"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+------------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_reference | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- verify shards are not dropped
|
-- verify shards are not dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_reference*;
|
\dt transactional_drop_reference*
|
||||||
Table "public.transactional_drop_reference_1410006"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+--------------------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_reference_1410006 | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- test master_apply_delete_command table with failing worker
|
-- test master_apply_delete_command table with failing worker
|
||||||
|
@ -400,11 +413,12 @@ ORDER BY
|
||||||
|
|
||||||
-- verify shards are not dropped
|
-- verify shards are not dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_shards_*;
|
\dt transactional_drop_shards_*
|
||||||
Table "public.transactional_drop_shards_1410005"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+-----------------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_shards_1410005 | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
DROP EVENT TRIGGER fail_drop_table;
|
DROP EVENT TRIGGER fail_drop_table;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
@ -464,16 +478,29 @@ ORDER BY
|
||||||
(16 rows)
|
(16 rows)
|
||||||
|
|
||||||
-- verify table is not dropped
|
-- verify table is not dropped
|
||||||
\d transactional_drop_serial;
|
\dt transactional_drop_serial
|
||||||
Table "public.transactional_drop_serial"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------------------------------------------------------------------------
|
--------+---------------------------+-------+----------
|
||||||
column1 | integer |
|
public | transactional_drop_serial | table | postgres
|
||||||
column2 | integer | not null default nextval('transactional_drop_serial_column2_seq'::regclass)
|
(1 row)
|
||||||
|
|
||||||
-- verify shards and sequence are not dropped
|
-- verify shards and sequence are not dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_serial_1410006;
|
\dt transactional_drop_serial_*
|
||||||
|
List of relations
|
||||||
|
Schema | Name | Type | Owner
|
||||||
|
--------+-----------------------------------+-------+----------
|
||||||
|
public | transactional_drop_serial_1410007 | table | postgres
|
||||||
|
public | transactional_drop_serial_1410008 | table | postgres
|
||||||
|
public | transactional_drop_serial_1410009 | table | postgres
|
||||||
|
public | transactional_drop_serial_1410010 | table | postgres
|
||||||
|
public | transactional_drop_serial_1410011 | table | postgres
|
||||||
|
public | transactional_drop_serial_1410012 | table | postgres
|
||||||
|
public | transactional_drop_serial_1410013 | table | postgres
|
||||||
|
public | transactional_drop_serial_1410014 | table | postgres
|
||||||
|
(8 rows)
|
||||||
|
|
||||||
\ds transactional_drop_serial_column2_seq
|
\ds transactional_drop_serial_column2_seq
|
||||||
List of relations
|
List of relations
|
||||||
Schema | Name | Type | Owner
|
Schema | Name | Type | Owner
|
||||||
|
@ -505,10 +532,20 @@ ORDER BY
|
||||||
(0 rows)
|
(0 rows)
|
||||||
|
|
||||||
-- verify table is dropped
|
-- verify table is dropped
|
||||||
\d transactional_drop_serial;
|
\dt transactional_drop_serial
|
||||||
|
List of relations
|
||||||
|
Schema | Name | Type | Owner
|
||||||
|
--------+------+------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
-- verify shards and sequence are dropped
|
-- verify shards and sequence are dropped
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d transactional_drop_serial_1410006;
|
\dt transactional_drop_serial_*
|
||||||
|
List of relations
|
||||||
|
Schema | Name | Type | Owner
|
||||||
|
--------+------+------+-------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
\ds transactional_drop_serial_column2_seq
|
\ds transactional_drop_serial_column2_seq
|
||||||
List of relations
|
List of relations
|
||||||
Schema | Name | Type | Owner
|
Schema | Name | Type | Owner
|
||||||
|
|
|
@ -144,15 +144,13 @@ NOTICE: using one-phase commit for distributed DDL commands
|
||||||
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
-- DDL commands
|
-- DDL commands
|
||||||
\d mx_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass;
|
||||||
Table "public.mx_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+----------------------------------------------------------
|
--------+---------+----------------------------------------------------------
|
||||||
col_1 | integer |
|
col_1 | integer |
|
||||||
col_2 | text |
|
col_2 | text |
|
||||||
col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass)
|
col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass)
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_test_uniq_index" UNIQUE, btree (col_1)
|
|
||||||
|
|
||||||
CREATE INDEX mx_test_index ON mx_table(col_2);
|
CREATE INDEX mx_test_index ON mx_table(col_2);
|
||||||
ERROR: operation is not allowed on this node
|
ERROR: operation is not allowed on this node
|
||||||
|
@ -163,16 +161,15 @@ HINT: Connect to the coordinator and run it again.
|
||||||
ALTER TABLE mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col_1) REFERENCES mx_table(col_1);
|
ALTER TABLE mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col_1) REFERENCES mx_table(col_1);
|
||||||
ERROR: operation is not allowed on this node
|
ERROR: operation is not allowed on this node
|
||||||
HINT: Connect to the coordinator and run it again.
|
HINT: Connect to the coordinator and run it again.
|
||||||
\d mx_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass;
|
||||||
Table "public.mx_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+----------------------------------------------------------
|
--------+---------+----------------------------------------------------------
|
||||||
col_1 | integer |
|
col_1 | integer |
|
||||||
col_2 | text |
|
col_2 | text |
|
||||||
col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass)
|
col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass)
|
||||||
Indexes:
|
(3 rows)
|
||||||
"mx_test_uniq_index" UNIQUE, btree (col_1)
|
|
||||||
|
|
||||||
|
\d mx_test_index
|
||||||
-- master_modify_multiple_shards
|
-- master_modify_multiple_shards
|
||||||
SELECT master_modify_multiple_shards('UPDATE mx_table SET col_2=''none''');
|
SELECT master_modify_multiple_shards('UPDATE mx_table SET col_2=''none''');
|
||||||
ERROR: operation is not allowed on this node
|
ERROR: operation is not allowed on this node
|
||||||
|
@ -402,23 +399,23 @@ CREATE SEQUENCE some_sequence;
|
||||||
DROP SEQUENCE some_sequence;
|
DROP SEQUENCE some_sequence;
|
||||||
-- Show that dropping the sequence of an MX table with cascade harms the table and shards
|
-- Show that dropping the sequence of an MX table with cascade harms the table and shards
|
||||||
BEGIN;
|
BEGIN;
|
||||||
\d mx_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass;
|
||||||
Table "public.mx_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+----------------------------------------------------------
|
--------+---------+----------------------------------------------------------
|
||||||
col_1 | integer |
|
col_1 | integer |
|
||||||
col_2 | text |
|
col_2 | text |
|
||||||
col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass)
|
col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass)
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
DROP SEQUENCE mx_table_col_3_seq CASCADE;
|
DROP SEQUENCE mx_table_col_3_seq CASCADE;
|
||||||
NOTICE: drop cascades to default for table mx_table column col_3
|
NOTICE: drop cascades to default for table mx_table column col_3
|
||||||
\d mx_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass;
|
||||||
Table "public.mx_table"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
--------+---------+-----------
|
--------+---------+-----------
|
||||||
col_1 | integer |
|
col_1 | integer |
|
||||||
col_2 | text |
|
col_2 | text |
|
||||||
col_3 | bigint | not null
|
col_3 | bigint | not null
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
-- Cleanup
|
-- Cleanup
|
||||||
|
|
|
@ -794,11 +794,12 @@ ORDER BY
|
||||||
|
|
||||||
-- verify that shard is replicated to other worker
|
-- verify that shard is replicated to other worker
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
\d upgrade_reference_table_transaction_commit_*
|
\dt upgrade_reference_table_transaction_commit_*
|
||||||
Table "public.upgrade_reference_table_transaction_commit_1360014"
|
List of relations
|
||||||
Column | Type | Modifiers
|
Schema | Name | Type | Owner
|
||||||
---------+---------+-----------
|
--------+----------------------------------------------------+-------+----------
|
||||||
column1 | integer |
|
public | upgrade_reference_table_transaction_commit_1360014 | table | postgres
|
||||||
|
(1 row)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP TABLE upgrade_reference_table_transaction_commit;
|
DROP TABLE upgrade_reference_table_transaction_commit;
|
||||||
|
|
|
@ -47,7 +47,7 @@ FROM
|
||||||
ORDER BY attnum;
|
ORDER BY attnum;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
|
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
|
||||||
SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
|
SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
|
||||||
-- Verify that SET NOT NULL works
|
-- Verify that SET NOT NULL works
|
||||||
|
|
||||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
|
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
|
|
||||||
-- Drop default so that NULLs will be inserted for this column
|
-- Drop default so that NULLs will be inserted for this column
|
||||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
|
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
|
||||||
|
@ -77,7 +77,7 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
|
||||||
-- Verify that DROP NOT NULL works
|
-- Verify that DROP NOT NULL works
|
||||||
|
|
||||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
|
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
|
|
||||||
-- \copy should succeed now
|
-- \copy should succeed now
|
||||||
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||||
|
@ -88,7 +88,7 @@ SELECT count(*) from lineitem_alter;
|
||||||
SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2;
|
SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2;
|
||||||
|
|
||||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT;
|
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
|
|
||||||
SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2;
|
SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2;
|
||||||
|
|
||||||
|
@ -116,19 +116,19 @@ ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS int_column2;
|
||||||
ALTER TABLE IF EXISTS lineitem_alter RENAME COLUMN l_orderkey_renamed TO l_orderkey;
|
ALTER TABLE IF EXISTS lineitem_alter RENAME COLUMN l_orderkey_renamed TO l_orderkey;
|
||||||
SELECT SUM(l_orderkey) FROM lineitem_alter;
|
SELECT SUM(l_orderkey) FROM lineitem_alter;
|
||||||
|
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
|
|
||||||
-- Verify that we can execute commands with multiple subcommands
|
-- Verify that we can execute commands with multiple subcommands
|
||||||
|
|
||||||
ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER,
|
ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER,
|
||||||
ADD COLUMN int_column2 INTEGER;
|
ADD COLUMN int_column2 INTEGER;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
|
|
||||||
ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER,
|
ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER,
|
||||||
ALTER COLUMN int_column1 SET STATISTICS 10;
|
ALTER COLUMN int_column1 SET STATISTICS 10;
|
||||||
|
|
||||||
ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2;
|
ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
|
|
||||||
-- Verify that we cannot execute alter commands on the distribution column
|
-- Verify that we cannot execute alter commands on the distribution column
|
||||||
|
|
||||||
|
@ -161,7 +161,7 @@ ALTER TABLE IF EXISTS non_existent_table RENAME COLUMN column1 TO column2;
|
||||||
|
|
||||||
-- Verify that none of the failed alter table commands took effect on the master
|
-- Verify that none of the failed alter table commands took effect on the master
|
||||||
-- node
|
-- node
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
|
|
||||||
-- verify that non-propagated ddl commands are allowed inside a transaction block
|
-- verify that non-propagated ddl commands are allowed inside a transaction block
|
||||||
SET citus.enable_ddl_propagation to false;
|
SET citus.enable_ddl_propagation to false;
|
||||||
|
@ -185,7 +185,8 @@ CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
|
||||||
ALTER TABLE lineitem_alter ADD COLUMN first integer;
|
ALTER TABLE lineitem_alter ADD COLUMN first integer;
|
||||||
COMMIT;
|
COMMIT;
|
||||||
|
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
|
\d temp_index_2
|
||||||
|
|
||||||
ALTER TABLE lineitem_alter DROP COLUMN first;
|
ALTER TABLE lineitem_alter DROP COLUMN first;
|
||||||
DROP INDEX temp_index_2;
|
DROP INDEX temp_index_2;
|
||||||
|
|
|
@ -41,14 +41,6 @@ CREATE INDEX is_index4 ON events_table(event_type);
|
||||||
CREATE INDEX is_index5 ON users_table(value_2);
|
CREATE INDEX is_index5 ON users_table(value_2);
|
||||||
CREATE INDEX is_index6 ON events_table(value_2);
|
CREATE INDEX is_index6 ON events_table(value_2);
|
||||||
|
|
||||||
-- create a helper function to create types/functions on each node
|
|
||||||
CREATE FUNCTION run_command_on_master_and_workers(p_sql text)
|
|
||||||
RETURNS void LANGUAGE plpgsql AS $$
|
|
||||||
BEGIN
|
|
||||||
EXECUTE p_sql;
|
|
||||||
PERFORM run_command_on_workers(p_sql);
|
|
||||||
END;$$;
|
|
||||||
|
|
||||||
-- Create composite type to use in subquery pushdown
|
-- Create composite type to use in subquery pushdown
|
||||||
SELECT run_command_on_master_and_workers($f$
|
SELECT run_command_on_master_and_workers($f$
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
# ---
|
# ---
|
||||||
test: multi_extension
|
test: multi_extension
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
|
test: multi_test_helpers
|
||||||
test: multi_table_ddl
|
test: multi_table_ddl
|
||||||
|
|
||||||
# ----------
|
# ----------
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
# ---
|
# ---
|
||||||
test: multi_extension
|
test: multi_extension
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
|
test: multi_test_helpers
|
||||||
|
|
||||||
test: multi_mx_create_table
|
test: multi_mx_create_table
|
||||||
test: multi_mx_copy_data multi_mx_router_planner
|
test: multi_mx_copy_data multi_mx_router_planner
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
# ---
|
# ---
|
||||||
test: multi_extension
|
test: multi_extension
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
|
test: multi_test_helpers
|
||||||
test: multi_table_ddl
|
test: multi_table_ddl
|
||||||
test: multi_name_lengths
|
test: multi_name_lengths
|
||||||
test: multi_metadata_access
|
test: multi_metadata_access
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
# ---
|
# ---
|
||||||
test: multi_extension
|
test: multi_extension
|
||||||
test: multi_cluster_management
|
test: multi_cluster_management
|
||||||
|
test: multi_test_helpers
|
||||||
test: multi_table_ddl
|
test: multi_table_ddl
|
||||||
|
|
||||||
# ----------
|
# ----------
|
||||||
|
|
|
@ -77,8 +77,7 @@ ORDER BY attnum;
|
||||||
(27 rows)
|
(27 rows)
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
Table "public.lineitem_alter"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-----------------+-----------------------+-----------
|
-----------------+-----------------------+-----------
|
||||||
l_orderkey | bigint | not null
|
l_orderkey | bigint | not null
|
||||||
|
@ -102,6 +101,7 @@ ORDER BY attnum;
|
||||||
int_column1 | integer | default 1
|
int_column1 | integer | default 1
|
||||||
int_column2 | integer | default 2
|
int_column2 | integer | default 2
|
||||||
null_column | integer |
|
null_column | integer |
|
||||||
|
(21 rows)
|
||||||
|
|
||||||
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
|
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
|
||||||
float_column | count
|
float_column | count
|
||||||
|
@ -138,8 +138,7 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
|
||||||
|
|
||||||
-- Verify that SET NOT NULL works
|
-- Verify that SET NOT NULL works
|
||||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
|
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
Table "public.lineitem_alter"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-----------------+-----------------------+--------------------
|
-----------------+-----------------------+--------------------
|
||||||
l_orderkey | bigint | not null
|
l_orderkey | bigint | not null
|
||||||
|
@ -163,6 +162,7 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
|
||||||
int_column1 | integer |
|
int_column1 | integer |
|
||||||
int_column2 | integer | not null default 2
|
int_column2 | integer | not null default 2
|
||||||
null_column | integer |
|
null_column | integer |
|
||||||
|
(21 rows)
|
||||||
|
|
||||||
-- Drop default so that NULLs will be inserted for this column
|
-- Drop default so that NULLs will be inserted for this column
|
||||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
|
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
|
||||||
|
@ -173,8 +173,7 @@ ERROR: null value in column "int_column2" violates not-null constraint
|
||||||
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
|
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
|
||||||
-- Verify that DROP NOT NULL works
|
-- Verify that DROP NOT NULL works
|
||||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
|
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
Table "public.lineitem_alter"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-----------------+-----------------------+-----------
|
-----------------+-----------------------+-----------
|
||||||
l_orderkey | bigint | not null
|
l_orderkey | bigint | not null
|
||||||
|
@ -198,6 +197,7 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
|
||||||
int_column1 | integer |
|
int_column1 | integer |
|
||||||
int_column2 | integer |
|
int_column2 | integer |
|
||||||
null_column | integer |
|
null_column | integer |
|
||||||
|
(21 rows)
|
||||||
|
|
||||||
-- \copy should succeed now
|
-- \copy should succeed now
|
||||||
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||||
|
@ -216,8 +216,7 @@ SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP B
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT;
|
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
Table "public.lineitem_alter"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-----------------+-----------------------+-----------
|
-----------------+-----------------------+-----------
|
||||||
l_orderkey | bigint | not null
|
l_orderkey | bigint | not null
|
||||||
|
@ -241,6 +240,7 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT;
|
||||||
int_column1 | integer |
|
int_column1 | integer |
|
||||||
int_column2 | double precision |
|
int_column2 | double precision |
|
||||||
null_column | integer |
|
null_column | integer |
|
||||||
|
(21 rows)
|
||||||
|
|
||||||
SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2;
|
SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2;
|
||||||
int_column2 | pg_typeof | count
|
int_column2 | pg_typeof | count
|
||||||
|
@ -280,8 +280,7 @@ SELECT SUM(l_orderkey) FROM lineitem_alter;
|
||||||
53620791
|
53620791
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
Table "public.lineitem_alter"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-----------------+-----------------------+-----------
|
-----------------+-----------------------+-----------
|
||||||
l_orderkey | bigint | not null
|
l_orderkey | bigint | not null
|
||||||
|
@ -301,12 +300,12 @@ SELECT SUM(l_orderkey) FROM lineitem_alter;
|
||||||
l_shipmode | character(10) | not null
|
l_shipmode | character(10) | not null
|
||||||
l_comment | character varying(44) | not null
|
l_comment | character varying(44) | not null
|
||||||
null_column | integer |
|
null_column | integer |
|
||||||
|
(17 rows)
|
||||||
|
|
||||||
-- Verify that we can execute commands with multiple subcommands
|
-- Verify that we can execute commands with multiple subcommands
|
||||||
ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER,
|
ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER,
|
||||||
ADD COLUMN int_column2 INTEGER;
|
ADD COLUMN int_column2 INTEGER;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
Table "public.lineitem_alter"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-----------------+-----------------------+-----------
|
-----------------+-----------------------+-----------
|
||||||
l_orderkey | bigint | not null
|
l_orderkey | bigint | not null
|
||||||
|
@ -328,14 +327,14 @@ ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER,
|
||||||
null_column | integer |
|
null_column | integer |
|
||||||
int_column1 | integer |
|
int_column1 | integer |
|
||||||
int_column2 | integer |
|
int_column2 | integer |
|
||||||
|
(19 rows)
|
||||||
|
|
||||||
ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER,
|
ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER,
|
||||||
ALTER COLUMN int_column1 SET STATISTICS 10;
|
ALTER COLUMN int_column1 SET STATISTICS 10;
|
||||||
ERROR: alter table command is currently unsupported
|
ERROR: alter table command is currently unsupported
|
||||||
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP CONSTRAINT and TYPE subcommands are supported.
|
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP CONSTRAINT and TYPE subcommands are supported.
|
||||||
ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2;
|
ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
Table "public.lineitem_alter"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-----------------+-----------------------+-----------
|
-----------------+-----------------------+-----------
|
||||||
l_orderkey | bigint | not null
|
l_orderkey | bigint | not null
|
||||||
|
@ -355,6 +354,7 @@ ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2;
|
||||||
l_shipmode | character(10) | not null
|
l_shipmode | character(10) | not null
|
||||||
l_comment | character varying(44) | not null
|
l_comment | character varying(44) | not null
|
||||||
null_column | integer |
|
null_column | integer |
|
||||||
|
(17 rows)
|
||||||
|
|
||||||
-- Verify that we cannot execute alter commands on the distribution column
|
-- Verify that we cannot execute alter commands on the distribution column
|
||||||
ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey DROP NOT NULL;
|
ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey DROP NOT NULL;
|
||||||
|
@ -395,8 +395,7 @@ ALTER TABLE IF EXISTS non_existent_table RENAME COLUMN column1 TO column2;
|
||||||
NOTICE: relation "non_existent_table" does not exist, skipping
|
NOTICE: relation "non_existent_table" does not exist, skipping
|
||||||
-- Verify that none of the failed alter table commands took effect on the master
|
-- Verify that none of the failed alter table commands took effect on the master
|
||||||
-- node
|
-- node
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
Table "public.lineitem_alter"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-----------------+-----------------------+-----------
|
-----------------+-----------------------+-----------
|
||||||
l_orderkey | bigint | not null
|
l_orderkey | bigint | not null
|
||||||
|
@ -416,6 +415,7 @@ NOTICE: relation "non_existent_table" does not exist, skipping
|
||||||
l_shipmode | character(10) | not null
|
l_shipmode | character(10) | not null
|
||||||
l_comment | character varying(44) | not null
|
l_comment | character varying(44) | not null
|
||||||
null_column | integer |
|
null_column | integer |
|
||||||
|
(17 rows)
|
||||||
|
|
||||||
-- verify that non-propagated ddl commands are allowed inside a transaction block
|
-- verify that non-propagated ddl commands are allowed inside a transaction block
|
||||||
SET citus.enable_ddl_propagation to false;
|
SET citus.enable_ddl_propagation to false;
|
||||||
|
@ -446,8 +446,7 @@ BEGIN;
|
||||||
CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
|
CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
|
||||||
ALTER TABLE lineitem_alter ADD COLUMN first integer;
|
ALTER TABLE lineitem_alter ADD COLUMN first integer;
|
||||||
COMMIT;
|
COMMIT;
|
||||||
\d lineitem_alter
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||||
Table "public.lineitem_alter"
|
|
||||||
Column | Type | Modifiers
|
Column | Type | Modifiers
|
||||||
-----------------+-----------------------+-----------
|
-----------------+-----------------------+-----------
|
||||||
l_orderkey | bigint | not null
|
l_orderkey | bigint | not null
|
||||||
|
@ -468,8 +467,14 @@ COMMIT;
|
||||||
l_comment | character varying(44) | not null
|
l_comment | character varying(44) | not null
|
||||||
null_column | integer |
|
null_column | integer |
|
||||||
first | integer |
|
first | integer |
|
||||||
Indexes:
|
(18 rows)
|
||||||
"temp_index_2" btree (l_orderkey)
|
|
||||||
|
\d temp_index_2
|
||||||
|
Index "public.temp_index_2"
|
||||||
|
Column | Type | Definition
|
||||||
|
------------+--------+------------
|
||||||
|
l_orderkey | bigint | l_orderkey
|
||||||
|
btree, for table "public.lineitem_alter"
|
||||||
|
|
||||||
ALTER TABLE lineitem_alter DROP COLUMN first;
|
ALTER TABLE lineitem_alter DROP COLUMN first;
|
||||||
DROP INDEX temp_index_2;
|
DROP INDEX temp_index_2;
|
||||||
|
|
|
@ -62,13 +62,6 @@ CREATE INDEX is_index3 ON users_table(value_1);
|
||||||
CREATE INDEX is_index4 ON events_table(event_type);
|
CREATE INDEX is_index4 ON events_table(event_type);
|
||||||
CREATE INDEX is_index5 ON users_table(value_2);
|
CREATE INDEX is_index5 ON users_table(value_2);
|
||||||
CREATE INDEX is_index6 ON events_table(value_2);
|
CREATE INDEX is_index6 ON events_table(value_2);
|
||||||
-- create a helper function to create types/functions on each node
|
|
||||||
CREATE FUNCTION run_command_on_master_and_workers(p_sql text)
|
|
||||||
RETURNS void LANGUAGE plpgsql AS $$
|
|
||||||
BEGIN
|
|
||||||
EXECUTE p_sql;
|
|
||||||
PERFORM run_command_on_workers(p_sql);
|
|
||||||
END;$$;
|
|
||||||
-- Create composite type to use in subquery pushdown
|
-- Create composite type to use in subquery pushdown
|
||||||
SELECT run_command_on_master_and_workers($f$
|
SELECT run_command_on_master_and_workers($f$
|
||||||
|
|
||||||
|
|
|
@ -439,7 +439,7 @@ my @arguments = (
|
||||||
'--user', $user
|
'--user', $user
|
||||||
);
|
);
|
||||||
|
|
||||||
if ($majorversion eq '9.5' || $majorversion eq '9.6')
|
if ($majorversion eq '9.5' || $majorversion eq '9.6' || $majorversion eq '10')
|
||||||
{
|
{
|
||||||
push(@arguments, '--bindir', "tmp_check/tmp-bin");
|
push(@arguments, '--bindir', "tmp_check/tmp-bin");
|
||||||
}
|
}
|
||||||
|
|
|
@ -398,11 +398,11 @@ INSERT INTO products VALUES(1,'product_1', 10, 8);
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
|
|
||||||
-- There should be no constraint on master and worker(s)
|
-- There should be no constraint on master and worker(s)
|
||||||
\d products
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass;
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
|
||||||
\d products_1450199
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass;
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
|
@ -415,11 +415,11 @@ ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no);
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
|
|
||||||
-- There should be no constraint on master and worker(s)
|
-- There should be no constraint on master and worker(s)
|
||||||
\d products
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass;
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
|
||||||
\d products_1450199
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass;
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP TABLE products;
|
DROP TABLE products;
|
||||||
|
|
|
@ -293,8 +293,8 @@ SELECT create_distributed_table('table_bigint', 'id', colocate_with => 'table1_g
|
||||||
|
|
||||||
-- check worker table schemas
|
-- check worker table schemas
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d table3_groupE_1300050
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table3_groupE_1300050'::regclass;
|
||||||
\d schema_collocation.table4_groupE_1300052
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='schema_collocation.table4_groupE_1300052'::regclass;
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
|
|
|
@ -237,9 +237,8 @@ SELECT master_create_distributed_table('check_example', 'partition_col', 'hash')
|
||||||
SELECT master_create_worker_shards('check_example', '2', '2');
|
SELECT master_create_worker_shards('check_example', '2', '2');
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d check_example*
|
\d check_example_partition_col_key_365040
|
||||||
\c - - - :worker_2_port
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365040'::regclass;
|
||||||
\d check_example*
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- drop unnecessary tables
|
-- drop unnecessary tables
|
||||||
|
@ -260,7 +259,7 @@ CREATE TABLE raw_table_2 (user_id int REFERENCES raw_table_1(user_id), UNIQUE(us
|
||||||
SELECT create_distributed_table('raw_table_2', 'user_id');
|
SELECT create_distributed_table('raw_table_2', 'user_id');
|
||||||
|
|
||||||
-- see that the constraint exists
|
-- see that the constraint exists
|
||||||
\d raw_table_2
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass;
|
||||||
|
|
||||||
-- should be prevented by the foreign key
|
-- should be prevented by the foreign key
|
||||||
DROP TABLE raw_table_1;
|
DROP TABLE raw_table_1;
|
||||||
|
@ -269,7 +268,7 @@ DROP TABLE raw_table_1;
|
||||||
DROP TABLE raw_table_1 CASCADE;
|
DROP TABLE raw_table_1 CASCADE;
|
||||||
|
|
||||||
-- see that the constraint also dropped
|
-- see that the constraint also dropped
|
||||||
\d raw_table_2
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass;
|
||||||
|
|
||||||
-- drop the table as well
|
-- drop the table as well
|
||||||
DROP TABLE raw_table_2;
|
DROP TABLE raw_table_2;
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
--
|
||||||
|
-- MULTI_CREATE_TABLE_NEW_FEATURES
|
||||||
|
--
|
||||||
|
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
|
||||||
|
-- Verify that the GENERATED ... AS IDENTITY feature in PostgreSQL 10
|
||||||
|
-- is forbidden in distributed tables.
|
||||||
|
|
||||||
|
CREATE TABLE table_identity_col (
|
||||||
|
id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
|
||||||
|
payload text );
|
||||||
|
|
||||||
|
SELECT master_create_distributed_table('table_identity_col', 'id', 'append');
|
||||||
|
|
||||||
|
SELECT create_distributed_table('table_identity_col', 'id');
|
||||||
|
SELECT create_distributed_table('table_identity_col', 'text');
|
||||||
|
|
||||||
|
SELECT create_reference_table('table_identity_col');
|
|
@ -426,6 +426,7 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
|
||||||
SET parallel_setup_cost=0;
|
SET parallel_setup_cost=0;
|
||||||
SET parallel_tuple_cost=0;
|
SET parallel_tuple_cost=0;
|
||||||
SET min_parallel_relation_size=0;
|
SET min_parallel_relation_size=0;
|
||||||
|
SET min_parallel_table_scan_size=0;
|
||||||
SET max_parallel_workers_per_gather=4;
|
SET max_parallel_workers_per_gather=4;
|
||||||
|
|
||||||
-- ensure local plans display correctly
|
-- ensure local plans display correctly
|
||||||
|
|
|
@ -5,6 +5,8 @@
|
||||||
|
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000;
|
||||||
|
|
||||||
|
-- print whether we're running on 9.5 to make version-specific tests clear
|
||||||
|
SELECT substring(version(), '\d+(?:\.\d+)?') = '9.5' AS is_95;
|
||||||
|
|
||||||
-- Set configuration to print table join order and pruned shards
|
-- Set configuration to print table join order and pruned shards
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,9 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
|
||||||
SET citus.enable_unique_job_ids TO off;
|
SET citus.enable_unique_job_ids TO off;
|
||||||
|
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SET client_min_messages TO DEBUG4;
|
SET client_min_messages TO DEBUG4;
|
||||||
|
|
|
@ -9,6 +9,10 @@
|
||||||
|
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
|
||||||
|
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
|
||||||
|
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SET client_min_messages TO DEBUG3;
|
SET client_min_messages TO DEBUG3;
|
||||||
|
|
|
@ -78,7 +78,9 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
||||||
SELECT * FROM pg_dist_shard ORDER BY shardid;
|
SELECT * FROM pg_dist_shard ORDER BY shardid;
|
||||||
SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport;
|
SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport;
|
||||||
\d mx_testing_schema.mx_test_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass;
|
||||||
|
\d mx_testing_schema.mx_test_table_col_1_key
|
||||||
|
\d mx_testing_schema.mx_index
|
||||||
|
|
||||||
-- Check that pg_dist_colocation is not synced
|
-- Check that pg_dist_colocation is not synced
|
||||||
SELECT * FROM pg_dist_colocation ORDER BY colocationid;
|
SELECT * FROM pg_dist_colocation ORDER BY colocationid;
|
||||||
|
@ -107,7 +109,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||||
|
|
||||||
-- Check that foreign key metadata exists on the worker
|
-- Check that foreign key metadata exists on the worker
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_testing_schema_2.fk_test_2
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass;
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP TABLE mx_testing_schema_2.fk_test_2;
|
DROP TABLE mx_testing_schema_2.fk_test_2;
|
||||||
|
@ -126,7 +128,9 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||||
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
||||||
SELECT * FROM pg_dist_shard ORDER BY shardid;
|
SELECT * FROM pg_dist_shard ORDER BY shardid;
|
||||||
SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport;
|
SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport;
|
||||||
\d mx_testing_schema.mx_test_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass;
|
||||||
|
\d mx_testing_schema.mx_test_table_col_1_key
|
||||||
|
\d mx_testing_schema.mx_index
|
||||||
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
|
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
|
||||||
|
|
||||||
-- Make sure that start_metadata_sync_to_node cannot be called inside a transaction
|
-- Make sure that start_metadata_sync_to_node cannot be called inside a transaction
|
||||||
|
@ -190,8 +194,13 @@ CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text);
|
||||||
CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2);
|
CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2);
|
||||||
ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1);
|
ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1);
|
||||||
|
|
||||||
\d mx_test_schema_1.mx_table_1
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||||
\d mx_test_schema_2.mx_table_2
|
\d mx_test_schema_1.mx_table_1_col1_key
|
||||||
|
\d mx_test_schema_1.mx_index_1
|
||||||
|
|
||||||
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_2.mx_table_2'::regclass;
|
||||||
|
\d mx_test_schema_2.mx_index_2
|
||||||
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_2.mx_table_2'::regclass;
|
||||||
|
|
||||||
SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1');
|
SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1');
|
||||||
SELECT create_distributed_table('mx_test_schema_2.mx_table_2', 'col1');
|
SELECT create_distributed_table('mx_test_schema_2.mx_table_2', 'col1');
|
||||||
|
@ -222,8 +231,7 @@ ORDER BY
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
|
||||||
-- Check that tables are created
|
-- Check that tables are created
|
||||||
\d mx_test_schema_1.mx_table_1
|
\dt mx_test_schema_?.mx_table_?
|
||||||
\d mx_test_schema_2.mx_table_2
|
|
||||||
|
|
||||||
-- Check that table metadata are created
|
-- Check that table metadata are created
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -260,16 +268,17 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport;
|
||||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||||
SET client_min_messages TO 'ERROR';
|
SET client_min_messages TO 'ERROR';
|
||||||
CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1);
|
CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1);
|
||||||
CREATE UNIQUE INDEX mx_index_4 ON mx_test_schema_2.mx_table_2(col1);
|
ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1);
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_test_schema_2.mx_table_2
|
\d mx_test_schema_2.mx_index_3
|
||||||
|
\d mx_test_schema_2.mx_table_2_col1_key
|
||||||
|
|
||||||
-- Check that DROP INDEX statement is propagated
|
-- Check that DROP INDEX statement is propagated
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||||
DROP INDEX mx_test_schema_2.mx_index_3;
|
DROP INDEX mx_test_schema_2.mx_index_3;
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_test_schema_2.mx_table_2
|
\d mx_test_schema_2.mx_index_3
|
||||||
|
|
||||||
-- Check that ALTER TABLE statements are propagated
|
-- Check that ALTER TABLE statements are propagated
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
@ -285,7 +294,8 @@ FOREIGN KEY
|
||||||
REFERENCES
|
REFERENCES
|
||||||
mx_test_schema_2.mx_table_2(col1);
|
mx_test_schema_2.mx_table_2(col1);
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_test_schema_1.mx_table_1
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||||
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||||
|
|
||||||
-- Check that foreign key constraint with NOT VALID works as well
|
-- Check that foreign key constraint with NOT VALID works as well
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
@ -301,7 +311,7 @@ REFERENCES
|
||||||
mx_test_schema_2.mx_table_2(col1)
|
mx_test_schema_2.mx_table_2(col1)
|
||||||
NOT VALID;
|
NOT VALID;
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_test_schema_1.mx_table_1
|
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||||
|
|
||||||
-- Check that mark_tables_colocated call propagates the changes to the workers
|
-- Check that mark_tables_colocated call propagates the changes to the workers
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
@ -417,13 +427,13 @@ DROP TABLE mx_table_with_small_sequence;
|
||||||
-- Create an MX table with (BIGSERIAL) sequences
|
-- Create an MX table with (BIGSERIAL) sequences
|
||||||
CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL);
|
CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL);
|
||||||
SELECT create_distributed_table('mx_table_with_sequence', 'a');
|
SELECT create_distributed_table('mx_table_with_sequence', 'a');
|
||||||
\d mx_table_with_sequence
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
|
||||||
\ds mx_table_with_sequence_b_seq
|
\ds mx_table_with_sequence_b_seq
|
||||||
\ds mx_table_with_sequence_c_seq
|
\ds mx_table_with_sequence_c_seq
|
||||||
|
|
||||||
-- Check that the sequences created on the metadata worker as well
|
-- Check that the sequences created on the metadata worker as well
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_table_with_sequence
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
|
||||||
\ds mx_table_with_sequence_b_seq
|
\ds mx_table_with_sequence_b_seq
|
||||||
\ds mx_table_with_sequence_c_seq
|
\ds mx_table_with_sequence_c_seq
|
||||||
|
|
||||||
|
@ -437,7 +447,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
SELECT groupid FROM pg_dist_local_group;
|
SELECT groupid FROM pg_dist_local_group;
|
||||||
\d mx_table_with_sequence
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
|
||||||
\ds mx_table_with_sequence_b_seq
|
\ds mx_table_with_sequence_b_seq
|
||||||
\ds mx_table_with_sequence_c_seq
|
\ds mx_table_with_sequence_c_seq
|
||||||
SELECT nextval('mx_table_with_sequence_b_seq');
|
SELECT nextval('mx_table_with_sequence_b_seq');
|
||||||
|
@ -525,10 +535,10 @@ DROP USER mx_user;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
CREATE TABLE mx_ref (col_1 int, col_2 text);
|
CREATE TABLE mx_ref (col_1 int, col_2 text);
|
||||||
SELECT create_reference_table('mx_ref');
|
SELECT create_reference_table('mx_ref');
|
||||||
\d mx_ref
|
\dt mx_ref
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_ref
|
\dt mx_ref
|
||||||
SELECT
|
SELECT
|
||||||
logicalrelid, partmethod, repmodel, shardid, placementid, nodename, nodeport
|
logicalrelid, partmethod, repmodel, shardid, placementid, nodename, nodeport
|
||||||
FROM
|
FROM
|
||||||
|
@ -546,10 +556,12 @@ SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_re
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0;
|
ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0;
|
||||||
CREATE INDEX mx_ref_index ON mx_ref(col_1);
|
CREATE INDEX mx_ref_index ON mx_ref(col_1);
|
||||||
\d mx_ref
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass;
|
||||||
|
\d mx_ref_index
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d mx_ref
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass;
|
||||||
|
\d mx_ref_index
|
||||||
|
|
||||||
-- Check that metada is cleaned successfully upon drop table
|
-- Check that metada is cleaned successfully upon drop table
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
|
@ -151,7 +151,7 @@ INSERT INTO labs VALUES (6, 'Bell Labs');
|
||||||
COMMIT;
|
COMMIT;
|
||||||
|
|
||||||
-- but the DDL should correctly roll back
|
-- but the DDL should correctly roll back
|
||||||
\d labs
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.labs'::regclass;
|
||||||
SELECT * FROM labs WHERE id = 6;
|
SELECT * FROM labs WHERE id = 6;
|
||||||
|
|
||||||
-- COPY can happen after single row INSERT
|
-- COPY can happen after single row INSERT
|
||||||
|
|
|
@ -20,19 +20,22 @@ ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL;
|
||||||
|
|
||||||
|
|
||||||
-- See that the changes are applied on coordinator, worker tables and shards
|
-- See that the changes are applied on coordinator, worker tables and shards
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
|
\d ddl_test*_index
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
|
\d ddl_test*_index
|
||||||
\d mx_ddl_table_1220088
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass;
|
||||||
|
\d ddl_test*_index_1220088
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
|
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
|
\d ddl_test*_index
|
||||||
\d mx_ddl_table_1220089
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass;
|
||||||
|
\d ddl_test*_index_1220089
|
||||||
|
|
||||||
INSERT INTO mx_ddl_table VALUES (37, 78, 2);
|
INSERT INTO mx_ddl_table VALUES (37, 78, 2);
|
||||||
INSERT INTO mx_ddl_table VALUES (38, 78);
|
INSERT INTO mx_ddl_table VALUES (38, 78);
|
||||||
|
@ -68,19 +71,22 @@ ALTER TABLE mx_ddl_table DROP COLUMN version;
|
||||||
|
|
||||||
|
|
||||||
-- See that the changes are applied on coordinator, worker tables and shards
|
-- See that the changes are applied on coordinator, worker tables and shards
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
|
\di ddl_test*_index
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
|
\di ddl_test*_index
|
||||||
\d mx_ddl_table_1220088
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass;
|
||||||
|
\di ddl_test*_index_1220088
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
|
|
||||||
\d mx_ddl_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||||
|
\di ddl_test*_index
|
||||||
\d mx_ddl_table_1220089
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass;
|
||||||
|
\di ddl_test*_index_1220089
|
||||||
|
|
||||||
-- Show that DDL commands are done within a two-phase commit transaction
|
-- Show that DDL commands are done within a two-phase commit transaction
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
|
@ -27,7 +27,9 @@ SELECT count(*) FROM pg_dist_transaction;
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
|
||||||
\d distributed_mx_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass;
|
||||||
|
\d distributed_mx_table_pkey
|
||||||
|
\d distributed_mx_table_value_idx
|
||||||
|
|
||||||
SELECT repmodel FROM pg_dist_partition
|
SELECT repmodel FROM pg_dist_partition
|
||||||
WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
||||||
|
@ -37,7 +39,9 @@ WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
||||||
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
|
|
||||||
\d distributed_mx_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass;
|
||||||
|
\d distributed_mx_table_pkey
|
||||||
|
\d distributed_mx_table_value_idx
|
||||||
|
|
||||||
SELECT repmodel FROM pg_dist_partition
|
SELECT repmodel FROM pg_dist_partition
|
||||||
WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
||||||
|
|
|
@ -14,7 +14,7 @@ SELECT master_create_distributed_table('too_long_1234567890123456789012345678901
|
||||||
SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2');
|
SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2');
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d too_long_*
|
\dt too_long_*
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- Verify that the UDF works and rejects bad arguments.
|
-- Verify that the UDF works and rejects bad arguments.
|
||||||
|
@ -50,7 +50,7 @@ ALTER TABLE name_lengths ADD EXCLUDE (int_col_1234567890123456789012345678901234
|
||||||
ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345678901234567890 > '2014-01-01'::date);
|
ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345678901234567890 > '2014-01-01'::date);
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d name_lengths_*
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_lengths_225002'::regclass;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- Placeholders for unsupported add constraints with EXPLICIT names that are too long
|
-- Placeholders for unsupported add constraints with EXPLICIT names that are too long
|
||||||
|
@ -59,7 +59,7 @@ ALTER TABLE name_lengths ADD CONSTRAINT nl_exclude_12345678901234567890123456789
|
||||||
ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_12345678901234567890123456789012345678901234567890 CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '2014-01-01'::date);
|
ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_12345678901234567890123456789012345678901234567890 CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '2014-01-01'::date);
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d nl_*
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_lengths_225002'::regclass;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- Placeholders for RENAME operations
|
-- Placeholders for RENAME operations
|
||||||
|
@ -99,13 +99,16 @@ CREATE TABLE sneaky_name_lengths (
|
||||||
col2 integer not null,
|
col2 integer not null,
|
||||||
CONSTRAINT checky_12345678901234567890123456789012345678901234567890 CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
|
CONSTRAINT checky_12345678901234567890123456789012345678901234567890 CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
|
||||||
);
|
);
|
||||||
\d sneaky_name_lengths*
|
|
||||||
|
\di public.sneaky_name_lengths*
|
||||||
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths'::regclass;
|
||||||
|
|
||||||
SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash');
|
SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash');
|
||||||
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
|
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d sneaky_name_lengths*
|
\di public.sneaky*225006
|
||||||
|
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
DROP TABLE sneaky_name_lengths CASCADE;
|
DROP TABLE sneaky_name_lengths CASCADE;
|
||||||
|
@ -121,7 +124,7 @@ SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
|
||||||
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
|
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d sneaky_name_lengths*
|
\di unique*225008
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
DROP TABLE sneaky_name_lengths CASCADE;
|
DROP TABLE sneaky_name_lengths CASCADE;
|
||||||
|
@ -135,7 +138,7 @@ SELECT master_create_distributed_table('too_long_1234567890123456789012345678901
|
||||||
SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2');
|
SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2');
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d too_long_*
|
\dt *225000000000*
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE;
|
DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE;
|
||||||
|
@ -148,7 +151,8 @@ SELECT master_create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043
|
||||||
SELECT master_create_worker_shards(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', '2', '2');
|
SELECT master_create_worker_shards(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', '2', '2');
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d elephant_*
|
\dt public.elephant_*
|
||||||
|
\di public.elephant_*
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- Clean up.
|
-- Clean up.
|
||||||
|
|
|
@ -8,6 +8,10 @@
|
||||||
|
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
|
||||||
|
|
||||||
|
-- print major version to make version-specific tests clear
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+') AS major_version;
|
||||||
|
|
||||||
SET client_min_messages TO DEBUG2;
|
SET client_min_messages TO DEBUG2;
|
||||||
SET citus.explain_all_tasks TO on;
|
SET citus.explain_all_tasks TO on;
|
||||||
-- to avoid differing explain output - executor doesn't matter,
|
-- to avoid differing explain output - executor doesn't matter,
|
||||||
|
|
|
@ -859,15 +859,18 @@ ALTER TABLE reference_table_ddl ALTER COLUMN value_2 SET DEFAULT 25.0;
|
||||||
ALTER TABLE reference_table_ddl ALTER COLUMN value_3 SET NOT NULL;
|
ALTER TABLE reference_table_ddl ALTER COLUMN value_3 SET NOT NULL;
|
||||||
|
|
||||||
-- see that Citus applied all DDLs to the table
|
-- see that Citus applied all DDLs to the table
|
||||||
\d reference_table_ddl
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl'::regclass;
|
||||||
|
\d reference_index_2
|
||||||
|
|
||||||
-- also to the shard placements
|
-- also to the shard placements
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d reference_table_ddl*
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl_1250019'::regclass;
|
||||||
|
\d reference_index_2_1250019
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP INDEX reference_index_2;
|
DROP INDEX reference_index_2;
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d reference_table_ddl*
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl_1250019'::regclass;
|
||||||
|
\di reference_index_2*
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- as we expect, renaming and setting WITH OIDS does not work for reference tables
|
-- as we expect, renaming and setting WITH OIDS does not work for reference tables
|
||||||
|
|
|
@ -384,7 +384,7 @@ WHERE
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- verify table structure is changed
|
-- verify table structure is changed
|
||||||
\d remove_node_reference_table
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_node_reference_table'::regclass;
|
||||||
|
|
||||||
-- re-add the node for next tests
|
-- re-add the node for next tests
|
||||||
SELECT master_add_node('localhost', :worker_2_port);
|
SELECT master_add_node('localhost', :worker_2_port);
|
||||||
|
|
|
@ -420,18 +420,18 @@ SET search_path TO public;
|
||||||
ALTER TABLE test_schema_support.nation_hash ADD COLUMN new_col INT;
|
ALTER TABLE test_schema_support.nation_hash ADD COLUMN new_col INT;
|
||||||
|
|
||||||
-- verify column is added
|
-- verify column is added
|
||||||
\d test_schema_support.nation_hash;
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS non_existent_column;
|
ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS non_existent_column;
|
||||||
ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col;
|
ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col;
|
||||||
|
|
||||||
-- verify column is dropped
|
-- verify column is dropped
|
||||||
\d test_schema_support.nation_hash;
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
--test with search_path is set
|
--test with search_path is set
|
||||||
|
@ -439,9 +439,9 @@ SET search_path TO test_schema_support;
|
||||||
ALTER TABLE nation_hash ADD COLUMN new_col INT;
|
ALTER TABLE nation_hash ADD COLUMN new_col INT;
|
||||||
|
|
||||||
-- verify column is added
|
-- verify column is added
|
||||||
\d test_schema_support.nation_hash;
|
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
SET search_path TO test_schema_support;
|
SET search_path TO test_schema_support;
|
||||||
|
@ -449,9 +449,9 @@ ALTER TABLE nation_hash DROP COLUMN IF EXISTS non_existent_column;
|
||||||
ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col;
|
ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col;
|
||||||
|
|
||||||
-- verify column is dropped
|
-- verify column is dropped
|
||||||
\d test_schema_support.nation_hash;
|
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
|
|
||||||
|
@ -462,18 +462,18 @@ SET search_path TO public;
|
||||||
CREATE INDEX index1 ON test_schema_support.nation_hash(n_name);
|
CREATE INDEX index1 ON test_schema_support.nation_hash(n_name);
|
||||||
|
|
||||||
--verify INDEX is created
|
--verify INDEX is created
|
||||||
\d test_schema_support.nation_hash;
|
\d test_schema_support.index1
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
\d test_schema_support.index1_1190003
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- DROP index
|
-- DROP index
|
||||||
DROP INDEX test_schema_support.index1;
|
DROP INDEX test_schema_support.index1;
|
||||||
|
|
||||||
--verify INDEX is dropped
|
--verify INDEX is dropped
|
||||||
\d test_schema_support.nation_hash;
|
\d test_schema_support.index1
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
\d test_schema_support.index1_1190003
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
--test with search_path is set
|
--test with search_path is set
|
||||||
|
@ -483,9 +483,9 @@ SET search_path TO test_schema_support;
|
||||||
CREATE INDEX index1 ON nation_hash(n_name);
|
CREATE INDEX index1 ON nation_hash(n_name);
|
||||||
|
|
||||||
--verify INDEX is created
|
--verify INDEX is created
|
||||||
\d test_schema_support.nation_hash;
|
\d test_schema_support.index1
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
\d test_schema_support.index1_1190003
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
-- DROP index
|
-- DROP index
|
||||||
|
@ -493,9 +493,9 @@ SET search_path TO test_schema_support;
|
||||||
DROP INDEX index1;
|
DROP INDEX index1;
|
||||||
|
|
||||||
--verify INDEX is dropped
|
--verify INDEX is dropped
|
||||||
\d test_schema_support.nation_hash;
|
\d test_schema_support.index1
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
\d test_schema_support.nation_hash_1190003;
|
\d test_schema_support.index1_1190003
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue