mirror of https://github.com/citusdata/citus.git
Compare commits
21 Commits
Author | SHA1 | Date |
---|---|---|
|
4f574c1712 | |
|
06bfe70cc9 | |
|
a6b65e0de3 | |
|
db945e0ca1 | |
|
85f2703bec | |
|
73ca2b9382 | |
|
f3f93ffb82 | |
|
885c77d0ae | |
|
95c4aed73b | |
|
7973d43ba4 | |
|
e7e36dddca | |
|
2b8db771ef | |
|
5474508c01 | |
|
8f9b1a839f | |
|
15fc7821a8 | |
|
962bdc67af | |
|
f7f1b6cc5e | |
|
128273393f | |
|
95ba5dd39c | |
|
80da0ed69a | |
|
64f9ba2746 |
88
CHANGELOG.md
88
CHANGELOG.md
|
@ -1,3 +1,91 @@
|
|||
### citus v9.3.2 (Jun 22, 2020) ###
|
||||
|
||||
* Fixes a version bump issue in 9.3.1
|
||||
|
||||
### citus v9.3.1 (Jun 17, 2020) ###
|
||||
|
||||
* Adds support to partially push down tdigest aggregates
|
||||
|
||||
* Fixes a crash that occurs when inserting implicitly coerced constants
|
||||
|
||||
### citus v9.3.0 (May 6, 2020) ###
|
||||
|
||||
* Adds `max_shared_pool_size` to control number of connections across sessions
|
||||
|
||||
* Adds support for window functions on coordinator
|
||||
|
||||
* Improves shard pruning logic to understand OR-conditions
|
||||
|
||||
* Prevents using an extra connection for intermediate result multi-casts
|
||||
|
||||
* Adds propagation of `ALTER ROLE .. SET` statements
|
||||
|
||||
* Adds `update_distributed_table_colocation` UDF to update colocation of tables
|
||||
|
||||
* Introduces a UDF to truncate local data after distributing a table
|
||||
|
||||
* Adds support for creating temp schemas in parallel
|
||||
|
||||
* Adds support for evaluation of `nextval` in the target list on coordinator
|
||||
|
||||
* Adds support for local execution of `COPY/TRUNCATE/DROP/DDL` commands
|
||||
|
||||
* Adds support for local execution of shard creation
|
||||
|
||||
* Uses local execution in a transaction block
|
||||
|
||||
* Adds support for querying distributed table sizes concurrently
|
||||
|
||||
* Allows `master_copy_shard_placement` to replicate placements to new nodes
|
||||
|
||||
* Allows table type to be used in target list
|
||||
|
||||
* Avoids having multiple maintenance daemons active for a single database
|
||||
|
||||
* Defers reference table replication to shard creation time
|
||||
|
||||
* Enables joins between local tables and reference tables in transaction blocks
|
||||
|
||||
* Ignores pruned target list entries in coordinator plan
|
||||
|
||||
* Improves `SIGTERM` handling of maintenance daemon
|
||||
|
||||
* Increases the default of `citus.node_connection_timeout` to 30 seconds
|
||||
|
||||
* Fixes a bug that occurs when creating remote tasks in local execution
|
||||
|
||||
* Fixes a bug that causes some DML queries containing aggregates to fail
|
||||
|
||||
* Fixes a bug that could cause failures in queries with subqueries or CTEs
|
||||
|
||||
* Fixes a bug that may cause some connection failures to throw errors
|
||||
|
||||
* Fixes a bug which caused queries with SRFs and function evalution to fail
|
||||
|
||||
* Fixes a bug with generated columns when executing `COPY dist_table TO file`
|
||||
|
||||
* Fixes a crash when using non-constant limit clauses
|
||||
|
||||
* Fixes a failure when composite types used in prepared statements
|
||||
|
||||
* Fixes a possible segfault when dropping dist. table in a transaction block
|
||||
|
||||
* Fixes a possible segfault when non-pushdownable aggs are solely used in HAVING
|
||||
|
||||
* Fixes a segfault when executing queries using `GROUPING`
|
||||
|
||||
* Fixes an error when using `LEFT JOIN with GROUP BY` on primary key
|
||||
|
||||
* Fixes an issue with distributing tables having generated cols not at the end
|
||||
|
||||
* Fixes automatic SSL permission issue when using "initdb --allow-group-access"
|
||||
|
||||
* Fixes errors which could occur when subqueries are parameters to aggregates
|
||||
|
||||
* Fixes possible issues by invalidating the plan cache in `master_update_node`
|
||||
|
||||
* Fixes timing issues which could be caused by changing system clock
|
||||
|
||||
### citus v9.2.4 (March 30, 2020) ###
|
||||
|
||||
* Fixes a release problem in 9.2.3
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 9.3devel.
|
||||
# Generated by GNU Autoconf 2.69 for Citus 9.3.2.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||
|
@ -579,8 +579,8 @@ MAKEFLAGS=
|
|||
# Identity of this package.
|
||||
PACKAGE_NAME='Citus'
|
||||
PACKAGE_TARNAME='citus'
|
||||
PACKAGE_VERSION='9.3devel'
|
||||
PACKAGE_STRING='Citus 9.3devel'
|
||||
PACKAGE_VERSION='9.3.2'
|
||||
PACKAGE_STRING='Citus 9.3.2'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
|
@ -664,6 +664,7 @@ infodir
|
|||
docdir
|
||||
oldincludedir
|
||||
includedir
|
||||
runstatedir
|
||||
localstatedir
|
||||
sharedstatedir
|
||||
sysconfdir
|
||||
|
@ -740,6 +741,7 @@ datadir='${datarootdir}'
|
|||
sysconfdir='${prefix}/etc'
|
||||
sharedstatedir='${prefix}/com'
|
||||
localstatedir='${prefix}/var'
|
||||
runstatedir='${localstatedir}/run'
|
||||
includedir='${prefix}/include'
|
||||
oldincludedir='/usr/include'
|
||||
docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
|
||||
|
@ -992,6 +994,15 @@ do
|
|||
| -silent | --silent | --silen | --sile | --sil)
|
||||
silent=yes ;;
|
||||
|
||||
-runstatedir | --runstatedir | --runstatedi | --runstated \
|
||||
| --runstate | --runstat | --runsta | --runst | --runs \
|
||||
| --run | --ru | --r)
|
||||
ac_prev=runstatedir ;;
|
||||
-runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
|
||||
| --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
|
||||
| --run=* | --ru=* | --r=*)
|
||||
runstatedir=$ac_optarg ;;
|
||||
|
||||
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
|
||||
ac_prev=sbindir ;;
|
||||
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
|
||||
|
@ -1129,7 +1140,7 @@ fi
|
|||
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
|
||||
datadir sysconfdir sharedstatedir localstatedir includedir \
|
||||
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
|
||||
libdir localedir mandir
|
||||
libdir localedir mandir runstatedir
|
||||
do
|
||||
eval ac_val=\$$ac_var
|
||||
# Remove trailing slashes.
|
||||
|
@ -1242,7 +1253,7 @@ if test "$ac_init_help" = "long"; then
|
|||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures Citus 9.3devel to adapt to many kinds of systems.
|
||||
\`configure' configures Citus 9.3.2 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
|
@ -1282,6 +1293,7 @@ Fine tuning of the installation directories:
|
|||
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
|
||||
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
|
||||
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
|
||||
--runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
|
||||
--libdir=DIR object code libraries [EPREFIX/lib]
|
||||
--includedir=DIR C header files [PREFIX/include]
|
||||
--oldincludedir=DIR C header files for non-gcc [/usr/include]
|
||||
|
@ -1303,7 +1315,7 @@ fi
|
|||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of Citus 9.3devel:";;
|
||||
short | recursive ) echo "Configuration of Citus 9.3.2:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
|
@ -1403,7 +1415,7 @@ fi
|
|||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
Citus configure 9.3devel
|
||||
Citus configure 9.3.2
|
||||
generated by GNU Autoconf 2.69
|
||||
|
||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||
|
@ -1886,7 +1898,7 @@ cat >config.log <<_ACEOF
|
|||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by Citus $as_me 9.3devel, which was
|
||||
It was created by Citus $as_me 9.3.2, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
$ $0 $@
|
||||
|
@ -5055,7 +5067,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
|||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by Citus $as_me 9.3devel, which was
|
||||
This file was extended by Citus $as_me 9.3.2, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
|
@ -5117,7 +5129,7 @@ _ACEOF
|
|||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||
ac_cs_version="\\
|
||||
Citus config.status 9.3devel
|
||||
Citus config.status 9.3.2
|
||||
configured by $0, generated by GNU Autoconf 2.69,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
# everyone needing autoconf installed, the resulting files are checked
|
||||
# into the SCM.
|
||||
|
||||
AC_INIT([Citus], [9.3devel])
|
||||
AC_INIT([Citus], [9.3.2])
|
||||
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
|
||||
|
||||
# we'll need sed and awk for some of the version commands
|
||||
|
|
|
@ -116,19 +116,18 @@ CallFuncExprRemotely(CallStmt *callStmt, DistObjectCacheEntry *procedure,
|
|||
ereport(DEBUG1, (errmsg("distribution argument value must be a constant")));
|
||||
return false;
|
||||
}
|
||||
Const *partitionValue = (Const *) partitionValueNode;
|
||||
|
||||
Datum partitionValueDatum = partitionValue->constvalue;
|
||||
Const *partitionValue = (Const *) partitionValueNode;
|
||||
if (partitionValue->consttype != partitionColumn->vartype)
|
||||
{
|
||||
CopyCoercionData coercionData;
|
||||
bool missingOk = false;
|
||||
|
||||
ConversionPathForTypes(partitionValue->consttype, partitionColumn->vartype,
|
||||
&coercionData);
|
||||
|
||||
partitionValueDatum = CoerceColumnValue(partitionValueDatum, &coercionData);
|
||||
partitionValue =
|
||||
TransformPartitionRestrictionValue(partitionColumn, partitionValue,
|
||||
missingOk);
|
||||
}
|
||||
|
||||
Datum partitionValueDatum = partitionValue->constvalue;
|
||||
ShardInterval *shardInterval = FindShardInterval(partitionValueDatum, distTable);
|
||||
if (shardInterval == NULL)
|
||||
{
|
||||
|
|
|
@ -1345,7 +1345,7 @@ DoCopyFromLocalTableIntoShards(Relation distributedRelation,
|
|||
char *qualifiedRelationName =
|
||||
generate_qualified_relation_name(RelationGetRelid(distributedRelation));
|
||||
ereport(NOTICE, (errmsg("copying the data has completed"),
|
||||
errdetail("The local data in the table is longer visible, "
|
||||
errdetail("The local data in the table is no longer visible, "
|
||||
"but is still on disk."),
|
||||
errhint("To remove the local data, run: SELECT "
|
||||
"truncate_local_data_after_distributing_table($$%s$$)",
|
||||
|
|
|
@ -264,6 +264,7 @@ static void EndPlacementStateCopyCommand(CopyPlacementState *placementState,
|
|||
static void UnclaimCopyConnections(List *connectionStateList);
|
||||
static void ShutdownCopyConnectionState(CopyConnectionState *connectionState,
|
||||
CitusCopyDestReceiver *copyDest);
|
||||
static SelectStmt * CitusCopySelect(CopyStmt *copyStatement);
|
||||
static void CitusCopyTo(CopyStmt *copyStatement, char *completionTag);
|
||||
static int64 ForwardCopyDataFromConnection(CopyOutState copyOutState,
|
||||
MultiConnection *connection);
|
||||
|
@ -2760,22 +2761,9 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
|
|||
{
|
||||
/*
|
||||
* COPY table TO PROGRAM / file is handled by wrapping the table
|
||||
* in a SELECT * FROM table and going through the result COPY logic.
|
||||
* in a SELECT and going through the resulting COPY logic.
|
||||
*/
|
||||
ColumnRef *allColumns = makeNode(ColumnRef);
|
||||
SelectStmt *selectStmt = makeNode(SelectStmt);
|
||||
ResTarget *selectTarget = makeNode(ResTarget);
|
||||
|
||||
allColumns->fields = list_make1(makeNode(A_Star));
|
||||
allColumns->location = -1;
|
||||
|
||||
selectTarget->name = NULL;
|
||||
selectTarget->indirection = NIL;
|
||||
selectTarget->val = (Node *) allColumns;
|
||||
selectTarget->location = -1;
|
||||
|
||||
selectStmt->targetList = list_make1(selectTarget);
|
||||
selectStmt->fromClause = list_make1(copyObject(copyStatement->relation));
|
||||
SelectStmt *selectStmt = CitusCopySelect(copyStatement);
|
||||
|
||||
/* replace original statement */
|
||||
copyStatement = copyObject(copyStatement);
|
||||
|
@ -2837,6 +2825,53 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusCopySelect generates a SelectStmt such that table may be replaced in
|
||||
* "COPY table FROM" for an equivalent result.
|
||||
*/
|
||||
static SelectStmt *
|
||||
CitusCopySelect(CopyStmt *copyStatement)
|
||||
{
|
||||
SelectStmt *selectStmt = makeNode(SelectStmt);
|
||||
selectStmt->fromClause = list_make1(copyObject(copyStatement->relation));
|
||||
|
||||
Relation distributedRelation = heap_openrv(copyStatement->relation, AccessShareLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation);
|
||||
List *targetList = NIL;
|
||||
|
||||
for (int i = 0; i < tupleDescriptor->natts; i++)
|
||||
{
|
||||
Form_pg_attribute attr = &tupleDescriptor->attrs[i];
|
||||
|
||||
if (attr->attisdropped
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|| attr->attgenerated
|
||||
#endif
|
||||
)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
ColumnRef *column = makeNode(ColumnRef);
|
||||
column->fields = list_make1(makeString(pstrdup(attr->attname.data)));
|
||||
column->location = -1;
|
||||
|
||||
ResTarget *selectTarget = makeNode(ResTarget);
|
||||
selectTarget->name = NULL;
|
||||
selectTarget->indirection = NIL;
|
||||
selectTarget->val = (Node *) column;
|
||||
selectTarget->location = -1;
|
||||
|
||||
targetList = lappend(targetList, selectTarget);
|
||||
}
|
||||
|
||||
heap_close(distributedRelation, NoLock);
|
||||
|
||||
selectStmt->targetList = targetList;
|
||||
return selectStmt;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusCopyTo runs a COPY .. TO STDOUT command on each shard to do a full
|
||||
* table dump.
|
||||
|
@ -3061,6 +3096,10 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist)
|
|||
{
|
||||
if (TupleDescAttr(tupDesc, i)->attisdropped)
|
||||
continue;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (TupleDescAttr(tupDesc, i)->attgenerated)
|
||||
continue;
|
||||
#endif
|
||||
attnums = lappend_int(attnums, i + 1);
|
||||
}
|
||||
}
|
||||
|
@ -3085,6 +3124,14 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist)
|
|||
continue;
|
||||
if (namestrcmp(&(att->attname), name) == 0)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (att->attgenerated)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
|
||||
errmsg("column \"%s\" is a generated column",
|
||||
name),
|
||||
errdetail("Generated columns cannot be used in COPY.")));
|
||||
#endif
|
||||
attnum = att->attnum;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#include "utils/memutils.h"
|
||||
|
||||
|
||||
int NodeConnectionTimeout = 5000;
|
||||
int NodeConnectionTimeout = 30000;
|
||||
int MaxCachedConnectionsPerWorker = 1;
|
||||
|
||||
HTAB *ConnectionHash = NULL;
|
||||
|
@ -193,7 +193,16 @@ GetNodeConnection(uint32 flags, const char *hostname, int32 port)
|
|||
MultiConnection *
|
||||
StartNodeConnection(uint32 flags, const char *hostname, int32 port)
|
||||
{
|
||||
return StartNodeUserDatabaseConnection(flags, hostname, port, NULL, NULL);
|
||||
MultiConnection *connection = StartNodeUserDatabaseConnection(flags, hostname, port,
|
||||
NULL, NULL);
|
||||
|
||||
/*
|
||||
* connection can only be NULL for optional connections, which we don't
|
||||
* support in this codepath.
|
||||
*/
|
||||
Assert((flags & OPTIONAL_CONNECTION) == 0);
|
||||
Assert(connection != NULL);
|
||||
return connection;
|
||||
}
|
||||
|
||||
|
||||
|
@ -209,6 +218,13 @@ GetNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
|
|||
MultiConnection *connection = StartNodeUserDatabaseConnection(flags, hostname, port,
|
||||
user, database);
|
||||
|
||||
/*
|
||||
* connection can only be NULL for optional connections, which we don't
|
||||
* support in this codepath.
|
||||
*/
|
||||
Assert((flags & OPTIONAL_CONNECTION) == 0);
|
||||
Assert(connection != NULL);
|
||||
|
||||
FinishConnectionEstablishment(connection);
|
||||
|
||||
return connection;
|
||||
|
@ -236,6 +252,13 @@ StartWorkerListConnections(List *workerNodeList, uint32 flags, const char *user,
|
|||
nodeName, nodePort,
|
||||
user, database);
|
||||
|
||||
/*
|
||||
* connection can only be NULL for optional connections, which we don't
|
||||
* support in this codepath.
|
||||
*/
|
||||
Assert((flags & OPTIONAL_CONNECTION) == 0);
|
||||
Assert(connection != NULL);
|
||||
|
||||
connectionList = lappend(connectionList, connection);
|
||||
}
|
||||
|
||||
|
|
|
@ -294,6 +294,13 @@ StartPlacementListConnection(uint32 flags, List *placementAccessList,
|
|||
chosenConnection = StartNodeUserDatabaseConnection(flags, nodeName, nodePort,
|
||||
userName, NULL);
|
||||
|
||||
/*
|
||||
* chosenConnection can only be NULL for optional connections, which we
|
||||
* don't support in this codepath.
|
||||
*/
|
||||
Assert((flags & OPTIONAL_CONNECTION) == 0);
|
||||
Assert(chosenConnection != NULL);
|
||||
|
||||
if ((flags & CONNECTION_PER_PLACEMENT) &&
|
||||
ConnectionAccessedDifferentPlacement(chosenConnection, placement))
|
||||
{
|
||||
|
@ -314,6 +321,13 @@ StartPlacementListConnection(uint32 flags, List *placementAccessList,
|
|||
nodeName, nodePort,
|
||||
userName, NULL);
|
||||
|
||||
/*
|
||||
* chosenConnection can only be NULL for optional connections,
|
||||
* which we don't support in this codepath.
|
||||
*/
|
||||
Assert((flags & OPTIONAL_CONNECTION) == 0);
|
||||
Assert(chosenConnection != NULL);
|
||||
|
||||
Assert(!ConnectionAccessedDifferentPlacement(chosenConnection, placement));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -208,7 +208,7 @@ typedef struct DistributedExecution
|
|||
* Flag to indiciate that the set of connections we are interested
|
||||
* in has changed and waitEventSet needs to be rebuilt.
|
||||
*/
|
||||
bool connectionSetChanged;
|
||||
bool rebuildWaitEventSet;
|
||||
|
||||
/*
|
||||
* Flag to indiciate that the set of wait events we are interested
|
||||
|
@ -1064,7 +1064,7 @@ CreateDistributedExecution(RowModifyLevel modLevel, List *taskList,
|
|||
|
||||
execution->raiseInterrupts = true;
|
||||
|
||||
execution->connectionSetChanged = false;
|
||||
execution->rebuildWaitEventSet = false;
|
||||
execution->waitFlagsChanged = false;
|
||||
|
||||
execution->jobIdList = jobIdList;
|
||||
|
@ -2008,6 +2008,7 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
|
|||
session->connection = connection;
|
||||
session->workerPool = workerPool;
|
||||
session->commandsSent = 0;
|
||||
|
||||
dlist_init(&session->pendingTaskQueue);
|
||||
dlist_init(&session->readyTaskQueue);
|
||||
|
||||
|
@ -2142,7 +2143,7 @@ RunDistributedExecution(DistributedExecution *execution)
|
|||
int eventSetSize = GetEventSetSize(execution->sessionList);
|
||||
|
||||
/* always (re)build the wait event set the first time */
|
||||
execution->connectionSetChanged = true;
|
||||
execution->rebuildWaitEventSet = true;
|
||||
|
||||
while (execution->unfinishedTaskCount > 0 && !cancellationReceived)
|
||||
{
|
||||
|
@ -2154,7 +2155,7 @@ RunDistributedExecution(DistributedExecution *execution)
|
|||
ManageWorkerPool(workerPool);
|
||||
}
|
||||
|
||||
if (execution->connectionSetChanged)
|
||||
if (execution->rebuildWaitEventSet)
|
||||
{
|
||||
if (events != NULL)
|
||||
{
|
||||
|
@ -2236,7 +2237,7 @@ RebuildWaitEventSet(DistributedExecution *execution)
|
|||
}
|
||||
|
||||
execution->waitEventSet = BuildWaitEventSet(execution->sessionList);
|
||||
execution->connectionSetChanged = false;
|
||||
execution->rebuildWaitEventSet = false;
|
||||
execution->waitFlagsChanged = false;
|
||||
|
||||
return GetEventSetSize(execution->sessionList);
|
||||
|
@ -2482,7 +2483,7 @@ ManageWorkerPool(WorkerPool *workerPool)
|
|||
}
|
||||
|
||||
INSTR_TIME_SET_CURRENT(workerPool->lastConnectionOpenTime);
|
||||
execution->connectionSetChanged = true;
|
||||
execution->rebuildWaitEventSet = true;
|
||||
}
|
||||
|
||||
|
||||
|
@ -2751,7 +2752,15 @@ ConnectionStateMachine(WorkerSession *session)
|
|||
break;
|
||||
}
|
||||
|
||||
int beforePollSocket = PQsocket(connection->pgConn);
|
||||
PostgresPollingStatusType pollMode = PQconnectPoll(connection->pgConn);
|
||||
|
||||
if (beforePollSocket != PQsocket(connection->pgConn))
|
||||
{
|
||||
/* rebuild the wait events if PQconnectPoll() changed the socket */
|
||||
execution->rebuildWaitEventSet = true;
|
||||
}
|
||||
|
||||
if (pollMode == PGRES_POLLING_FAILED)
|
||||
{
|
||||
connection->connectionState = MULTI_CONNECTION_FAILED;
|
||||
|
@ -2759,10 +2768,16 @@ ConnectionStateMachine(WorkerSession *session)
|
|||
else if (pollMode == PGRES_POLLING_READING)
|
||||
{
|
||||
UpdateConnectionWaitFlags(session, WL_SOCKET_READABLE);
|
||||
|
||||
/* we should have a valid socket */
|
||||
Assert(PQsocket(connection->pgConn) != -1);
|
||||
}
|
||||
else if (pollMode == PGRES_POLLING_WRITING)
|
||||
{
|
||||
UpdateConnectionWaitFlags(session, WL_SOCKET_WRITEABLE);
|
||||
|
||||
/* we should have a valid socket */
|
||||
Assert(PQsocket(connection->pgConn) != -1);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -2771,6 +2786,9 @@ ConnectionStateMachine(WorkerSession *session)
|
|||
WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE);
|
||||
|
||||
connection->connectionState = MULTI_CONNECTION_CONNECTED;
|
||||
|
||||
/* we should have a valid socket */
|
||||
Assert(PQsocket(connection->pgConn) != -1);
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -2855,7 +2873,7 @@ ConnectionStateMachine(WorkerSession *session)
|
|||
ShutdownConnection(connection);
|
||||
|
||||
/* remove connection from wait event set */
|
||||
execution->connectionSetChanged = true;
|
||||
execution->rebuildWaitEventSet = true;
|
||||
|
||||
/*
|
||||
* Reset the transaction state machine since CloseConnection()
|
||||
|
|
|
@ -468,29 +468,19 @@ ExtractLocalAndRemoteTasks(bool readOnly, List *taskList, List **localTaskList,
|
|||
/* either the local or the remote should be non-nil */
|
||||
Assert(!(localTaskPlacementList == NIL && remoteTaskPlacementList == NIL));
|
||||
|
||||
if (list_length(task->taskPlacementList) == 1)
|
||||
if (localTaskPlacementList == NIL)
|
||||
{
|
||||
/*
|
||||
* At this point, the task has a single placement (e.g,. anchor shard
|
||||
* is distributed table's shard). So, it is either added to local or
|
||||
* remote taskList.
|
||||
*/
|
||||
if (localTaskPlacementList == NIL)
|
||||
{
|
||||
*remoteTaskList = lappend(*remoteTaskList, task);
|
||||
}
|
||||
else
|
||||
{
|
||||
*localTaskList = lappend(*localTaskList, task);
|
||||
}
|
||||
*remoteTaskList = lappend(*remoteTaskList, task);
|
||||
}
|
||||
else if (remoteTaskPlacementList == NIL)
|
||||
{
|
||||
*localTaskList = lappend(*localTaskList, task);
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* At this point, we're dealing with reference tables or intermediate
|
||||
* results where the task has placements on both local and remote
|
||||
* nodes. We always prefer to use local placement, and require remote
|
||||
* placements only for modifications.
|
||||
* At this point, we're dealing with a task that has placements on both
|
||||
* local and remote nodes.
|
||||
*/
|
||||
task->partiallyLocalOrRemote = true;
|
||||
|
||||
|
@ -505,6 +495,8 @@ ExtractLocalAndRemoteTasks(bool readOnly, List *taskList, List **localTaskList,
|
|||
}
|
||||
else
|
||||
{
|
||||
/* since shard replication factor > 1, we should have at least 1 remote task */
|
||||
Assert(remoteTaskPlacementList != NIL);
|
||||
Task *remoteTask = copyObject(task);
|
||||
remoteTask->taskPlacementList = remoteTaskPlacementList;
|
||||
|
||||
|
|
|
@ -150,6 +150,13 @@ MultiClientConnectStart(const char *nodeName, uint32 nodePort, const char *nodeD
|
|||
MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags,
|
||||
nodeName, nodePort,
|
||||
userName, nodeDatabase);
|
||||
|
||||
/*
|
||||
* connection can only be NULL for optional connections, which we don't
|
||||
* support in this codepath.
|
||||
*/
|
||||
Assert((connectionFlags & OPTIONAL_CONNECTION) == 0);
|
||||
Assert(connection != NULL);
|
||||
ConnStatusType connStatusType = PQstatus(connection->pgConn);
|
||||
|
||||
/*
|
||||
|
|
|
@ -2423,13 +2423,13 @@ CurrentUserName(void)
|
|||
|
||||
|
||||
/*
|
||||
* LookupTypeOid returns the Oid of the "pg_catalog.{typeNameString}" type, or
|
||||
* LookupTypeOid returns the Oid of the "{schemaNameSting}.{typeNameString}" type, or
|
||||
* InvalidOid if it does not exist.
|
||||
*/
|
||||
static Oid
|
||||
LookupTypeOid(char *typeNameString)
|
||||
Oid
|
||||
LookupTypeOid(char *schemaNameSting, char *typeNameString)
|
||||
{
|
||||
Value *schemaName = makeString("pg_catalog");
|
||||
Value *schemaName = makeString(schemaNameSting);
|
||||
Value *typeName = makeString(typeNameString);
|
||||
List *qualifiedName = list_make2(schemaName, typeName);
|
||||
TypeName *enumTypeName = makeTypeNameFromNameList(qualifiedName);
|
||||
|
@ -2461,7 +2461,7 @@ LookupTypeOid(char *typeNameString)
|
|||
static Oid
|
||||
LookupStringEnumValueId(char *enumName, char *valueName)
|
||||
{
|
||||
Oid enumTypeId = LookupTypeOid(enumName);
|
||||
Oid enumTypeId = LookupTypeOid("pg_catalog", enumName);
|
||||
|
||||
if (enumTypeId == InvalidOid)
|
||||
{
|
||||
|
|
|
@ -124,7 +124,6 @@ static PlannerRestrictionContext * CurrentPlannerRestrictionContext(void);
|
|||
static void PopPlannerRestrictionContext(void);
|
||||
static void ResetPlannerRestrictionContext(
|
||||
PlannerRestrictionContext *plannerRestrictionContext);
|
||||
static bool HasUnresolvedExternParamsWalker(Node *expression, ParamListInfo boundParams);
|
||||
static bool IsLocalReferenceTableJoin(Query *parse, List *rangeTableList);
|
||||
static bool QueryIsNotSimpleSelect(Node *node);
|
||||
static void UpdateReferenceTablesWithShard(List *rangeTableList);
|
||||
|
@ -2242,7 +2241,7 @@ ResetPlannerRestrictionContext(PlannerRestrictionContext *plannerRestrictionCont
|
|||
* has external parameters that are not contained in boundParams, false
|
||||
* otherwise.
|
||||
*/
|
||||
static bool
|
||||
bool
|
||||
HasUnresolvedExternParamsWalker(Node *expression, ParamListInfo boundParams)
|
||||
{
|
||||
if (expression == NULL)
|
||||
|
|
|
@ -314,12 +314,10 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
|
|||
|
||||
if (partitionValue->consttype != partitionColumn->vartype)
|
||||
{
|
||||
CopyCoercionData coercionData;
|
||||
|
||||
ConversionPathForTypes(partitionValue->consttype, partitionColumn->vartype,
|
||||
&coercionData);
|
||||
|
||||
partitionValueDatum = CoerceColumnValue(partitionValueDatum, &coercionData);
|
||||
bool missingOk = false;
|
||||
partitionValue =
|
||||
TransformPartitionRestrictionValue(partitionColumn, partitionValue,
|
||||
missingOk);
|
||||
}
|
||||
|
||||
shardInterval = FindShardInterval(partitionValueDatum, distTable);
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "distributed/multi_logical_planner.h"
|
||||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/pg_dist_partition.h"
|
||||
#include "distributed/tdigest_extension.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
|
@ -61,6 +62,8 @@
|
|||
#include "utils/rel.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
#define StartsWith(msg, prefix) \
|
||||
(strncmp(msg, prefix, strlen(prefix)) == 0)
|
||||
|
||||
/* Config variable managed via guc.c */
|
||||
int LimitClauseRowFetchCount = -1; /* number of rows to fetch from each task */
|
||||
|
@ -1951,6 +1954,131 @@ MasterAggregateExpression(Aggref *originalAggregate,
|
|||
|
||||
newMasterExpression = (Expr *) unionAggregate;
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_TDIGEST_COMBINE ||
|
||||
aggregateType == AGGREGATE_TDIGEST_ADD_DOUBLE)
|
||||
{
|
||||
/* tdigest of column */
|
||||
Oid tdigestType = TDigestExtensionTypeOid(); /* tdigest type */
|
||||
Oid unionFunctionId = TDigestExtensionAggTDigest1();
|
||||
|
||||
int32 tdigestReturnTypeMod = exprTypmod((Node *) originalAggregate);
|
||||
Oid tdigestTypeCollationId = exprCollation((Node *) originalAggregate);
|
||||
|
||||
/* create first argument for tdigest_precentile(tdigest, double) */
|
||||
Var *tdigestColumn = makeVar(masterTableId, walkerContext->columnId, tdigestType,
|
||||
tdigestReturnTypeMod, tdigestTypeCollationId,
|
||||
columnLevelsUp);
|
||||
TargetEntry *tdigestTargetEntry = makeTargetEntry((Expr *) tdigestColumn,
|
||||
argumentId,
|
||||
NULL, false);
|
||||
walkerContext->columnId++;
|
||||
|
||||
/* construct the master tdigest(tdigest) expression */
|
||||
Aggref *unionAggregate = makeNode(Aggref);
|
||||
unionAggregate->aggfnoid = unionFunctionId;
|
||||
unionAggregate->aggtype = originalAggregate->aggtype;
|
||||
unionAggregate->args = list_make1(tdigestTargetEntry);
|
||||
unionAggregate->aggkind = AGGKIND_NORMAL;
|
||||
unionAggregate->aggfilter = NULL;
|
||||
unionAggregate->aggtranstype = InvalidOid;
|
||||
unionAggregate->aggargtypes = list_make1_oid(tdigestType);
|
||||
unionAggregate->aggsplit = AGGSPLIT_SIMPLE;
|
||||
|
||||
newMasterExpression = (Expr *) unionAggregate;
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_TDIGEST_PERCENTILE_ADD_DOUBLE ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_ADD_DOUBLEARRAY ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_OF_ADD_DOUBLE ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_OF_ADD_DOUBLEARRAY)
|
||||
{
|
||||
/* tdigest of column */
|
||||
Oid tdigestType = TDigestExtensionTypeOid();
|
||||
Oid unionFunctionId = InvalidOid;
|
||||
if (aggregateType == AGGREGATE_TDIGEST_PERCENTILE_ADD_DOUBLE)
|
||||
{
|
||||
unionFunctionId = TDigestExtensionAggTDigestPercentile2();
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_TDIGEST_PERCENTILE_ADD_DOUBLEARRAY)
|
||||
{
|
||||
unionFunctionId = TDigestExtensionAggTDigestPercentile2a();
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_TDIGEST_PERCENTILE_OF_ADD_DOUBLE)
|
||||
{
|
||||
unionFunctionId = TDigestExtensionAggTDigestPercentileOf2();
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_TDIGEST_PERCENTILE_OF_ADD_DOUBLEARRAY)
|
||||
{
|
||||
unionFunctionId = TDigestExtensionAggTDigestPercentileOf2a();
|
||||
}
|
||||
Assert(OidIsValid(unionFunctionId));
|
||||
|
||||
int32 tdigestReturnTypeMod = exprTypmod((Node *) originalAggregate);
|
||||
Oid tdigestTypeCollationId = exprCollation((Node *) originalAggregate);
|
||||
|
||||
/* create first argument for tdigest_precentile(tdigest, double) */
|
||||
Var *tdigestColumn = makeVar(masterTableId, walkerContext->columnId, tdigestType,
|
||||
tdigestReturnTypeMod, tdigestTypeCollationId,
|
||||
columnLevelsUp);
|
||||
TargetEntry *tdigestTargetEntry = makeTargetEntry((Expr *) tdigestColumn,
|
||||
argumentId, NULL, false);
|
||||
walkerContext->columnId++;
|
||||
|
||||
/* construct the master tdigest_precentile(tdigest, double) expression */
|
||||
Aggref *unionAggregate = makeNode(Aggref);
|
||||
unionAggregate->aggfnoid = unionFunctionId;
|
||||
unionAggregate->aggtype = originalAggregate->aggtype;
|
||||
unionAggregate->args = list_make2(
|
||||
tdigestTargetEntry,
|
||||
list_nth(originalAggregate->args, 2));
|
||||
unionAggregate->aggkind = AGGKIND_NORMAL;
|
||||
unionAggregate->aggfilter = NULL;
|
||||
unionAggregate->aggtranstype = InvalidOid;
|
||||
unionAggregate->aggargtypes = list_make2_oid(
|
||||
tdigestType,
|
||||
list_nth_oid(originalAggregate->aggargtypes, 2));
|
||||
unionAggregate->aggsplit = AGGSPLIT_SIMPLE;
|
||||
|
||||
newMasterExpression = (Expr *) unionAggregate;
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_TDIGEST_PERCENTILE_TDIGEST_DOUBLE ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_TDIGEST_DOUBLEARRAY ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_OF_TDIGEST_DOUBLE ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_OF_TDIGEST_DOUBLEARRAY)
|
||||
{
|
||||
/* tdigest of column */
|
||||
Oid tdigestType = TDigestExtensionTypeOid();
|
||||
|
||||
/* These functions already will combine the tdigest arguments returned */
|
||||
Oid unionFunctionId = originalAggregate->aggfnoid;
|
||||
|
||||
int32 tdigestReturnTypeMod = exprTypmod((Node *) originalAggregate);
|
||||
Oid tdigestTypeCollationId = exprCollation((Node *) originalAggregate);
|
||||
|
||||
/* create first argument for tdigest_precentile(tdigest, double) */
|
||||
Var *tdigestColumn = makeVar(masterTableId, walkerContext->columnId, tdigestType,
|
||||
tdigestReturnTypeMod, tdigestTypeCollationId,
|
||||
columnLevelsUp);
|
||||
TargetEntry *tdigestTargetEntry = makeTargetEntry((Expr *) tdigestColumn,
|
||||
argumentId, NULL, false);
|
||||
walkerContext->columnId++;
|
||||
|
||||
/* construct the master tdigest_precentile(tdigest, double) expression */
|
||||
Aggref *unionAggregate = makeNode(Aggref);
|
||||
unionAggregate->aggfnoid = unionFunctionId;
|
||||
unionAggregate->aggtype = originalAggregate->aggtype;
|
||||
unionAggregate->args = list_make2(
|
||||
tdigestTargetEntry,
|
||||
list_nth(originalAggregate->args, 1));
|
||||
unionAggregate->aggkind = AGGKIND_NORMAL;
|
||||
unionAggregate->aggfilter = NULL;
|
||||
unionAggregate->aggtranstype = InvalidOid;
|
||||
unionAggregate->aggargtypes = list_make2_oid(
|
||||
tdigestType,
|
||||
list_nth_oid(originalAggregate->aggargtypes, 1));
|
||||
unionAggregate->aggsplit = AGGSPLIT_SIMPLE;
|
||||
|
||||
newMasterExpression = (Expr *) unionAggregate;
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_CUSTOM_COMBINE)
|
||||
{
|
||||
HeapTuple aggTuple =
|
||||
|
@ -3079,6 +3207,71 @@ WorkerAggregateExpressionList(Aggref *originalAggregate,
|
|||
workerAggregateList = lappend(workerAggregateList, sumAggregate);
|
||||
workerAggregateList = lappend(workerAggregateList, countAggregate);
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_TDIGEST_PERCENTILE_ADD_DOUBLE ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_ADD_DOUBLEARRAY ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_OF_ADD_DOUBLE ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_OF_ADD_DOUBLEARRAY)
|
||||
{
|
||||
/*
|
||||
* The original query has an aggregate in the form of either
|
||||
* - tdigest_percentile(column, compression, quantile)
|
||||
* - tdigest_percentile(column, compression, quantile[])
|
||||
* - tdigest_percentile_of(column, compression, value)
|
||||
* - tdigest_percentile_of(column, compression, value[])
|
||||
*
|
||||
* We are creating the worker part of this query by creating a
|
||||
* - tdigest(column, compression)
|
||||
*
|
||||
* One could see we are passing argument 0 and argument 1 from the original query
|
||||
* in here. This corresponds with the list_nth calls in the args and aggargstypes
|
||||
* list construction. The tdigest function and type are read from the catalog.
|
||||
*/
|
||||
Aggref *newWorkerAggregate = copyObject(originalAggregate);
|
||||
newWorkerAggregate->aggfnoid = TDigestExtensionAggTDigest2();
|
||||
newWorkerAggregate->aggtype = TDigestExtensionTypeOid();
|
||||
newWorkerAggregate->args = list_make2(
|
||||
list_nth(newWorkerAggregate->args, 0),
|
||||
list_nth(newWorkerAggregate->args, 1));
|
||||
newWorkerAggregate->aggkind = AGGKIND_NORMAL;
|
||||
newWorkerAggregate->aggtranstype = InvalidOid;
|
||||
newWorkerAggregate->aggargtypes = list_make2_oid(
|
||||
list_nth_oid(newWorkerAggregate->aggargtypes, 0),
|
||||
list_nth_oid(newWorkerAggregate->aggargtypes, 1));
|
||||
newWorkerAggregate->aggsplit = AGGSPLIT_SIMPLE;
|
||||
|
||||
workerAggregateList = lappend(workerAggregateList, newWorkerAggregate);
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_TDIGEST_PERCENTILE_TDIGEST_DOUBLE ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_TDIGEST_DOUBLEARRAY ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_OF_TDIGEST_DOUBLE ||
|
||||
aggregateType == AGGREGATE_TDIGEST_PERCENTILE_OF_TDIGEST_DOUBLEARRAY)
|
||||
{
|
||||
/*
|
||||
* The original query has an aggregate in the form of either
|
||||
* - tdigest_percentile(tdigest, quantile)
|
||||
* - tdigest_percentile(tdigest, quantile[])
|
||||
* - tdigest_percentile_of(tdigest, value)
|
||||
* - tdigest_percentile_of(tdigest, value[])
|
||||
*
|
||||
* We are creating the worker part of this query by creating a
|
||||
* - tdigest(tdigest)
|
||||
*
|
||||
* One could see we are passing argument 0 from the original query in here. This
|
||||
* corresponds with the list_nth calls in the args and aggargstypes list
|
||||
* construction. The tdigest function and type are read from the catalog.
|
||||
*/
|
||||
Aggref *newWorkerAggregate = copyObject(originalAggregate);
|
||||
newWorkerAggregate->aggfnoid = TDigestExtensionAggTDigest1();
|
||||
newWorkerAggregate->aggtype = TDigestExtensionTypeOid();
|
||||
newWorkerAggregate->args = list_make1(list_nth(newWorkerAggregate->args, 0));
|
||||
newWorkerAggregate->aggkind = AGGKIND_NORMAL;
|
||||
newWorkerAggregate->aggtranstype = InvalidOid;
|
||||
newWorkerAggregate->aggargtypes = list_make1_oid(
|
||||
list_nth_oid(newWorkerAggregate->aggargtypes, 0));
|
||||
newWorkerAggregate->aggsplit = AGGSPLIT_SIMPLE;
|
||||
|
||||
workerAggregateList = lappend(workerAggregateList, newWorkerAggregate);
|
||||
}
|
||||
else if (aggregateType == AGGREGATE_CUSTOM_COMBINE)
|
||||
{
|
||||
HeapTuple aggTuple =
|
||||
|
@ -3178,6 +3371,66 @@ GetAggregateType(Aggref *aggregateExpression)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* All functions from github.com/tvondra/tdigest start with the "tdigest" prefix.
|
||||
* Since it requires lookups of function names in a schema we would like to only
|
||||
* perform these checks if there is some chance it will actually result in a positive
|
||||
* hit.
|
||||
*/
|
||||
if (StartsWith(aggregateProcName, "tdigest"))
|
||||
{
|
||||
if (aggFunctionId == TDigestExtensionAggTDigest1())
|
||||
{
|
||||
return AGGREGATE_TDIGEST_COMBINE;
|
||||
}
|
||||
|
||||
if (aggFunctionId == TDigestExtensionAggTDigest2())
|
||||
{
|
||||
return AGGREGATE_TDIGEST_ADD_DOUBLE;
|
||||
}
|
||||
|
||||
if (aggFunctionId == TDigestExtensionAggTDigestPercentile3())
|
||||
{
|
||||
return AGGREGATE_TDIGEST_PERCENTILE_ADD_DOUBLE;
|
||||
}
|
||||
|
||||
if (aggFunctionId == TDigestExtensionAggTDigestPercentile3a())
|
||||
{
|
||||
return AGGREGATE_TDIGEST_PERCENTILE_ADD_DOUBLEARRAY;
|
||||
}
|
||||
|
||||
if (aggFunctionId == TDigestExtensionAggTDigestPercentile2())
|
||||
{
|
||||
return AGGREGATE_TDIGEST_PERCENTILE_TDIGEST_DOUBLE;
|
||||
}
|
||||
|
||||
if (aggFunctionId == TDigestExtensionAggTDigestPercentile2a())
|
||||
{
|
||||
return AGGREGATE_TDIGEST_PERCENTILE_TDIGEST_DOUBLEARRAY;
|
||||
}
|
||||
|
||||
if (aggFunctionId == TDigestExtensionAggTDigestPercentileOf3())
|
||||
{
|
||||
return AGGREGATE_TDIGEST_PERCENTILE_OF_ADD_DOUBLE;
|
||||
}
|
||||
|
||||
if (aggFunctionId == TDigestExtensionAggTDigestPercentileOf3a())
|
||||
{
|
||||
return AGGREGATE_TDIGEST_PERCENTILE_OF_ADD_DOUBLEARRAY;
|
||||
}
|
||||
|
||||
if (aggFunctionId == TDigestExtensionAggTDigestPercentileOf2())
|
||||
{
|
||||
return AGGREGATE_TDIGEST_PERCENTILE_OF_TDIGEST_DOUBLE;
|
||||
}
|
||||
|
||||
if (aggFunctionId == TDigestExtensionAggTDigestPercentileOf2a())
|
||||
{
|
||||
return AGGREGATE_TDIGEST_PERCENTILE_OF_TDIGEST_DOUBLEARRAY;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (AggregateEnabledCustom(aggregateExpression))
|
||||
{
|
||||
return AGGREGATE_CUSTOM_COMBINE;
|
||||
|
|
|
@ -177,6 +177,20 @@ CitusCustomScanPathPlan(PlannerInfo *root,
|
|||
{
|
||||
CitusCustomScanPath *citusPath = (CitusCustomScanPath *) best_path;
|
||||
|
||||
/*
|
||||
* Columns could have been pruned from the target list by the standard planner.
|
||||
* A situation in which this might happen is a CASE that is proven to be always the
|
||||
* same causing the other column to become useless;
|
||||
* CASE WHEN ... <> NULL
|
||||
* THEN ...
|
||||
* ELSE ...
|
||||
* END
|
||||
* Since nothing is equal to NULL it will always end up in the else branch. The final
|
||||
* target list the planenr needs from our node is passed in as tlist. By placing that
|
||||
* as the target list on our scan the internal rows will be projected to this one.
|
||||
*/
|
||||
citusPath->remoteScan->scan.plan.targetlist = tlist;
|
||||
|
||||
/* clauses might have been added by the planner, need to add them to our scan */
|
||||
RestrictInfo *restrictInfo = NULL;
|
||||
List **quals = &citusPath->remoteScan->scan.plan.qual;
|
||||
|
|
|
@ -95,6 +95,24 @@ bool EnableUniqueJobIds = true;
|
|||
static List *OperatorCache = NIL;
|
||||
|
||||
|
||||
/* context passed down in AddAnyValueAggregates mutator */
|
||||
typedef struct AddAnyValueAggregatesContext
|
||||
{
|
||||
/* SortGroupClauses corresponding to the GROUP BY clause */
|
||||
List *groupClauseList;
|
||||
|
||||
/* TargetEntry's to which the GROUP BY clauses refer */
|
||||
List *groupByTargetEntryList;
|
||||
|
||||
/*
|
||||
* haveNonVarGrouping is true if there are expressions in the
|
||||
* GROUP BY target entries. We use this as an optimisation to
|
||||
* skip expensive checks when possible.
|
||||
*/
|
||||
bool haveNonVarGrouping;
|
||||
} AddAnyValueAggregatesContext;
|
||||
|
||||
|
||||
/* Local functions forward declarations for job creation */
|
||||
static Job * BuildJobTree(MultiTreeRoot *multiTree);
|
||||
static MultiNode * LeftMostNode(MultiTreeRoot *multiTree);
|
||||
|
@ -105,6 +123,7 @@ static Query * BuildReduceQuery(MultiExtendedOp *extendedOpNode, List *dependent
|
|||
static List * BaseRangeTableList(MultiNode *multiNode);
|
||||
static List * QueryTargetList(MultiNode *multiNode);
|
||||
static List * TargetEntryList(List *expressionList);
|
||||
static Node * AddAnyValueAggregates(Node *node, AddAnyValueAggregatesContext *context);
|
||||
static List * QueryGroupClauseList(MultiNode *multiNode);
|
||||
static List * QuerySelectClauseList(MultiNode *multiNode);
|
||||
static List * QueryJoinClauseList(MultiNode *multiNode);
|
||||
|
@ -696,13 +715,11 @@ BuildJobQuery(MultiNode *multiNode, List *dependentJobList)
|
|||
*/
|
||||
if (groupClauseList != NIL && isRepartitionJoin)
|
||||
{
|
||||
targetList = (List *) expression_tree_mutator((Node *) targetList,
|
||||
AddAnyValueAggregates,
|
||||
groupClauseList);
|
||||
targetList = (List *) WrapUngroupedVarsInAnyValueAggregate(
|
||||
(Node *) targetList, groupClauseList, targetList, true);
|
||||
|
||||
havingQual = expression_tree_mutator((Node *) havingQual,
|
||||
AddAnyValueAggregates,
|
||||
groupClauseList);
|
||||
havingQual = WrapUngroupedVarsInAnyValueAggregate(
|
||||
(Node *) havingQual, groupClauseList, targetList, false);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -974,23 +991,116 @@ TargetEntryList(List *expressionList)
|
|||
|
||||
|
||||
/*
|
||||
* AddAnyValueAggregates wraps all vars that do not appear in the GROUP BY
|
||||
* clause or are inside an aggregate function in an any_value aggregate
|
||||
* function. This is needed for repartition joins because primary keys are not
|
||||
* present on intermediate tables.
|
||||
* WrapUngroupedVarsInAnyValueAggregate finds Var nodes in the expression
|
||||
* that do not refer to any GROUP BY column and wraps them in an any_value
|
||||
* aggregate. These columns are allowed when the GROUP BY is on a primary
|
||||
* key of a relation, but not if we wrap the relation in a subquery.
|
||||
* However, since we still know the value is unique, any_value gives the
|
||||
* right result.
|
||||
*/
|
||||
Node *
|
||||
AddAnyValueAggregates(Node *node, void *context)
|
||||
WrapUngroupedVarsInAnyValueAggregate(Node *expression, List *groupClauseList,
|
||||
List *targetList, bool checkExpressionEquality)
|
||||
{
|
||||
if (expression == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
AddAnyValueAggregatesContext context;
|
||||
context.groupClauseList = groupClauseList;
|
||||
context.groupByTargetEntryList = GroupTargetEntryList(groupClauseList, targetList);
|
||||
context.haveNonVarGrouping = false;
|
||||
|
||||
if (checkExpressionEquality)
|
||||
{
|
||||
/*
|
||||
* If the GROUP BY contains non-Var expressions, we need to do an expensive
|
||||
* subexpression equality check.
|
||||
*/
|
||||
TargetEntry *targetEntry = NULL;
|
||||
foreach_ptr(targetEntry, context.groupByTargetEntryList)
|
||||
{
|
||||
if (!IsA(targetEntry->expr, Var))
|
||||
{
|
||||
context.haveNonVarGrouping = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* put the result in the same memory context */
|
||||
MemoryContext nodeContext = GetMemoryChunkContext(expression);
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(nodeContext);
|
||||
|
||||
Node *result = expression_tree_mutator(expression, AddAnyValueAggregates,
|
||||
&context);
|
||||
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AddAnyValueAggregates wraps all vars that do not appear in the GROUP BY
|
||||
* clause or are inside an aggregate function in an any_value aggregate
|
||||
* function. This is needed because postgres allows columns that are not
|
||||
* in the GROUP BY to appear on the target list as long as the primary key
|
||||
* of the table is in the GROUP BY, but we sometimes wrap the join tree
|
||||
* in a subquery in which case the primary key information is lost.
|
||||
*
|
||||
* This function copies parts of the node tree, but may contain references
|
||||
* to the original node tree.
|
||||
*
|
||||
* The implementation is derived from / inspired by
|
||||
* check_ungrouped_columns_walker.
|
||||
*/
|
||||
static Node *
|
||||
AddAnyValueAggregates(Node *node, AddAnyValueAggregatesContext *context)
|
||||
{
|
||||
List *groupClauseList = context;
|
||||
if (node == NULL)
|
||||
{
|
||||
return node;
|
||||
}
|
||||
|
||||
if (IsA(node, Var))
|
||||
if (IsA(node, Aggref) || IsA(node, GroupingFunc))
|
||||
{
|
||||
/* any column is allowed to appear in an aggregate or grouping */
|
||||
return node;
|
||||
}
|
||||
else if (IsA(node, Var))
|
||||
{
|
||||
Var *var = (Var *) node;
|
||||
|
||||
/*
|
||||
* Check whether this Var appears in the GROUP BY.
|
||||
*/
|
||||
TargetEntry *groupByTargetEntry = NULL;
|
||||
foreach_ptr(groupByTargetEntry, context->groupByTargetEntryList)
|
||||
{
|
||||
if (!IsA(groupByTargetEntry->expr, Var))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
Var *groupByVar = (Var *) groupByTargetEntry->expr;
|
||||
|
||||
/* we should only be doing this at the top level of the query */
|
||||
Assert(groupByVar->varlevelsup == 0);
|
||||
|
||||
if (var->varno == groupByVar->varno &&
|
||||
var->varattno == groupByVar->varattno)
|
||||
{
|
||||
/* this Var is in the GROUP BY, do not wrap it */
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We have found a Var that does not appear in the GROUP BY.
|
||||
* Wrap it in an any_value aggregate.
|
||||
*/
|
||||
Aggref *agg = makeNode(Aggref);
|
||||
agg->aggfnoid = CitusAnyValueFunctionId();
|
||||
agg->aggtype = var->vartype;
|
||||
|
@ -1002,31 +1112,24 @@ AddAnyValueAggregates(Node *node, void *context)
|
|||
agg->aggcollid = exprCollation((Node *) var);
|
||||
return (Node *) agg;
|
||||
}
|
||||
if (IsA(node, TargetEntry))
|
||||
else if (context->haveNonVarGrouping)
|
||||
{
|
||||
TargetEntry *targetEntry = (TargetEntry *) node;
|
||||
|
||||
|
||||
/*
|
||||
* Stop searching this part of the tree if the targetEntry is part of
|
||||
* the group by clause.
|
||||
* The GROUP BY contains at least one expression. Check whether the
|
||||
* current expression is equal to one of the GROUP BY expressions.
|
||||
* Otherwise, continue to descend into subexpressions.
|
||||
*/
|
||||
if (targetEntry->ressortgroupref != 0)
|
||||
TargetEntry *groupByTargetEntry = NULL;
|
||||
foreach_ptr(groupByTargetEntry, context->groupByTargetEntryList)
|
||||
{
|
||||
SortGroupClause *sortGroupClause = NULL;
|
||||
foreach_ptr(sortGroupClause, groupClauseList)
|
||||
if (equal(node, groupByTargetEntry->expr))
|
||||
{
|
||||
if (sortGroupClause->tleSortGroupRef == targetEntry->ressortgroupref)
|
||||
{
|
||||
return node;
|
||||
}
|
||||
/* do not descend into mutator, all Vars are safe */
|
||||
return node;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (IsA(node, Aggref) || IsA(node, GroupingFunc))
|
||||
{
|
||||
return node;
|
||||
}
|
||||
|
||||
return expression_tree_mutator(node, AddAnyValueAggregates, context);
|
||||
}
|
||||
|
||||
|
@ -2130,12 +2233,29 @@ BuildJobTreeTaskList(Job *jobTree, PlannerRestrictionContext *plannerRestriction
|
|||
List *assignedSqlTaskList = AssignTaskList(sqlTaskList);
|
||||
AssignDataFetchDependencies(assignedSqlTaskList);
|
||||
|
||||
/* now assign merge task's data fetch dependencies */
|
||||
/* if the parameters has not been resolved, record it */
|
||||
job->parametersInJobQueryResolved =
|
||||
!HasUnresolvedExternParamsWalker((Node *) job->jobQuery, NULL);
|
||||
|
||||
/*
|
||||
* Make final adjustments for the assigned tasks.
|
||||
*
|
||||
* First, update SELECT tasks' parameters resolved field.
|
||||
*
|
||||
* Second, assign merge task's data fetch dependencies.
|
||||
*/
|
||||
foreach(assignedSqlTaskCell, assignedSqlTaskList)
|
||||
{
|
||||
Task *assignedSqlTask = (Task *) lfirst(assignedSqlTaskCell);
|
||||
List *assignedMergeTaskList = FindDependentMergeTaskList(assignedSqlTask);
|
||||
|
||||
/* we don't support parameters in the physical planner */
|
||||
if (assignedSqlTask->taskType == SELECT_TASK)
|
||||
{
|
||||
assignedSqlTask->parametersInQueryStringResolved =
|
||||
job->parametersInJobQueryResolved;
|
||||
}
|
||||
|
||||
List *assignedMergeTaskList = FindDependentMergeTaskList(assignedSqlTask);
|
||||
AssignDataFetchDependencies(assignedMergeTaskList);
|
||||
}
|
||||
|
||||
|
|
|
@ -2317,6 +2317,20 @@ TargetShardIntervalForFastPathQuery(Query *query, bool *isMultiShardQuery,
|
|||
if (inputDistributionKeyValue && !inputDistributionKeyValue->constisnull)
|
||||
{
|
||||
CitusTableCacheEntry *cache = GetCitusTableCacheEntry(relationId);
|
||||
Var *distributionKey = cache->partitionColumn;
|
||||
|
||||
/*
|
||||
* We currently don't allow implicitly coerced values to be handled by fast-
|
||||
* path planner. Still, let's be defensive for any future changes..
|
||||
*/
|
||||
if (inputDistributionKeyValue->consttype != distributionKey->vartype)
|
||||
{
|
||||
bool missingOk = false;
|
||||
inputDistributionKeyValue =
|
||||
TransformPartitionRestrictionValue(distributionKey,
|
||||
inputDistributionKeyValue, missingOk);
|
||||
}
|
||||
|
||||
ShardInterval *cachedShardInterval =
|
||||
FindShardInterval(inputDistributionKeyValue->constvalue, cache);
|
||||
if (cachedShardInterval == NULL)
|
||||
|
@ -2638,9 +2652,20 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError)
|
|||
if (partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod ==
|
||||
DISTRIBUTE_BY_RANGE)
|
||||
{
|
||||
Var *distributionKey = cacheEntry->partitionColumn;
|
||||
|
||||
/* handle coercions, if fails throw an error */
|
||||
if (partitionValueConst->consttype != distributionKey->vartype)
|
||||
{
|
||||
bool missingOk = false;
|
||||
partitionValueConst =
|
||||
TransformPartitionRestrictionValue(distributionKey,
|
||||
partitionValueConst,
|
||||
missingOk);
|
||||
}
|
||||
|
||||
Datum partitionValue = partitionValueConst->constvalue;
|
||||
|
||||
cacheEntry = GetCitusTableCacheEntry(distributedTableId);
|
||||
ShardInterval *shardInterval = FindShardInterval(partitionValue, cacheEntry);
|
||||
if (shardInterval != NULL)
|
||||
{
|
||||
|
|
|
@ -1648,15 +1648,15 @@ SubqueryPushdownMultiNodeTree(Query *originalQuery)
|
|||
*/
|
||||
if (extendedOpNode->groupClauseList != NIL)
|
||||
{
|
||||
extendedOpNode->targetList =
|
||||
(List *) expression_tree_mutator((Node *) extendedOpNode->targetList,
|
||||
AddAnyValueAggregates,
|
||||
extendedOpNode->groupClauseList);
|
||||
extendedOpNode->targetList = (List *) WrapUngroupedVarsInAnyValueAggregate(
|
||||
(Node *) extendedOpNode->targetList,
|
||||
extendedOpNode->groupClauseList,
|
||||
extendedOpNode->targetList, true);
|
||||
|
||||
extendedOpNode->havingQual =
|
||||
expression_tree_mutator((Node *) extendedOpNode->havingQual,
|
||||
AddAnyValueAggregates,
|
||||
extendedOpNode->groupClauseList);
|
||||
extendedOpNode->havingQual = WrapUngroupedVarsInAnyValueAggregate(
|
||||
(Node *) extendedOpNode->havingQual,
|
||||
extendedOpNode->groupClauseList,
|
||||
extendedOpNode->targetList, false);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -93,6 +93,7 @@
|
|||
#include "parser/parse_coerce.h"
|
||||
#include "utils/arrayaccess.h"
|
||||
#include "utils/catcache.h"
|
||||
#include "utils/fmgrprotos.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/ruleutils.h"
|
||||
|
@ -255,14 +256,14 @@ static void AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context,
|
|||
Const *constantClause);
|
||||
static bool VarConstOpExprClause(OpExpr *opClause, Var *partitionColumn,
|
||||
Var **varClause, Const **constantClause);
|
||||
static Const * TransformPartitionRestrictionValue(Var *partitionColumn,
|
||||
Const *restrictionValue);
|
||||
static void AddSAOPartitionKeyRestrictionToInstance(ClauseWalkerContext *context,
|
||||
ScalarArrayOpExpr *
|
||||
arrayOperatorExpression);
|
||||
static bool SAORestrictions(ScalarArrayOpExpr *arrayOperatorExpression,
|
||||
Var *partitionColumn,
|
||||
List **requestedRestrictions);
|
||||
static void ErrorTypesDontMatch(Oid firstType, Oid firstCollId, Oid secondType,
|
||||
Oid secondCollId);
|
||||
static bool IsValidHashRestriction(OpExpr *opClause);
|
||||
static void AddHashRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opClause,
|
||||
Var *varClause, Const *constantClause);
|
||||
|
@ -1111,7 +1112,7 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
|
|||
{
|
||||
/* we want our restriction value in terms of the type of the partition column */
|
||||
constantClause = TransformPartitionRestrictionValue(partitionColumn,
|
||||
constantClause);
|
||||
constantClause, true);
|
||||
if (constantClause == NULL)
|
||||
{
|
||||
/* couldn't coerce value, its invalid restriction */
|
||||
|
@ -1223,8 +1224,9 @@ AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opCla
|
|||
* It is conceivable that in some instances this may not be possible,
|
||||
* in those cases we will simply fail to prune partitions based on this clause.
|
||||
*/
|
||||
static Const *
|
||||
TransformPartitionRestrictionValue(Var *partitionColumn, Const *restrictionValue)
|
||||
Const *
|
||||
TransformPartitionRestrictionValue(Var *partitionColumn, Const *restrictionValue,
|
||||
bool missingOk)
|
||||
{
|
||||
Node *transformedValue = coerce_to_target_type(NULL, (Node *) restrictionValue,
|
||||
restrictionValue->consttype,
|
||||
|
@ -1236,6 +1238,13 @@ TransformPartitionRestrictionValue(Var *partitionColumn, Const *restrictionValue
|
|||
/* if NULL, no implicit coercion is possible between the types */
|
||||
if (transformedValue == NULL)
|
||||
{
|
||||
if (!missingOk)
|
||||
{
|
||||
ErrorTypesDontMatch(partitionColumn->vartype, partitionColumn->varcollid,
|
||||
restrictionValue->consttype,
|
||||
restrictionValue->constcollid);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1248,6 +1257,13 @@ TransformPartitionRestrictionValue(Var *partitionColumn, Const *restrictionValue
|
|||
/* if still not a constant, no immutable coercion matched */
|
||||
if (!IsA(transformedValue, Const))
|
||||
{
|
||||
if (!missingOk)
|
||||
{
|
||||
ErrorTypesDontMatch(partitionColumn->vartype, partitionColumn->varcollid,
|
||||
restrictionValue->consttype,
|
||||
restrictionValue->constcollid);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1255,6 +1271,24 @@ TransformPartitionRestrictionValue(Var *partitionColumn, Const *restrictionValue
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ErrorTypesDontMatch throws an error explicitly printing the type names.
|
||||
*/
|
||||
static void
|
||||
ErrorTypesDontMatch(Oid firstType, Oid firstCollId, Oid secondType, Oid secondCollId)
|
||||
{
|
||||
Datum firstTypename =
|
||||
DirectFunctionCall1Coll(regtypeout, firstCollId, ObjectIdGetDatum(firstType));
|
||||
|
||||
Datum secondTypename =
|
||||
DirectFunctionCall1Coll(regtypeout, secondCollId, ObjectIdGetDatum(secondType));
|
||||
|
||||
ereport(ERROR, (errmsg("Cannot coerce %s to %s",
|
||||
DatumGetCString(secondTypename),
|
||||
DatumGetCString(firstTypename))));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsValidHashRestriction checks whether an operator clause is a valid restriction for hashed column.
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,248 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* tdigest_extension.c
|
||||
* Helper functions to get access to tdigest specific data.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#include "postgres.h"
|
||||
|
||||
#include "access/genam.h"
|
||||
#include "access/htup_details.h"
|
||||
#include "catalog/pg_extension.h"
|
||||
#include "catalog/pg_type.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/tdigest_extension.h"
|
||||
#include "parser/parse_func.h"
|
||||
#include "utils/fmgroids.h"
|
||||
#include "utils/lsyscache.h"
|
||||
|
||||
|
||||
static Oid LookupTDigestFunction(const char *functionName, int argcount, Oid *argtypes);
|
||||
|
||||
/*
|
||||
* TDigestExtensionSchema finds the schema the tdigest extension is installed in. The
|
||||
* function will return InvalidOid if the extension is not installed.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionSchema()
|
||||
{
|
||||
ScanKeyData entry[1];
|
||||
Form_pg_extension extensionForm = NULL;
|
||||
Oid tdigestExtensionSchema = InvalidOid;
|
||||
|
||||
Relation relation = heap_open(ExtensionRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&entry[0],
|
||||
Anum_pg_extension_extname,
|
||||
BTEqualStrategyNumber, F_NAMEEQ,
|
||||
CStringGetDatum("tdigest"));
|
||||
|
||||
SysScanDesc scandesc = systable_beginscan(relation, ExtensionNameIndexId, true,
|
||||
NULL, 1, entry);
|
||||
|
||||
HeapTuple extensionTuple = systable_getnext(scandesc);
|
||||
|
||||
/*
|
||||
* We assume that there can be at most one matching tuple, if no tuple found the
|
||||
* extension is not installed. The value of InvalidOid will not be changed.
|
||||
*/
|
||||
if (HeapTupleIsValid(extensionTuple))
|
||||
{
|
||||
extensionForm = (Form_pg_extension) GETSTRUCT(extensionTuple);
|
||||
tdigestExtensionSchema = extensionForm->extnamespace;
|
||||
Assert(OidIsValid(tdigestExtensionSchema));
|
||||
}
|
||||
|
||||
systable_endscan(scandesc);
|
||||
|
||||
heap_close(relation, AccessShareLock);
|
||||
|
||||
return tdigestExtensionSchema;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionTypeOid performs a lookup for the Oid of the type representing the
|
||||
* tdigest as installed by the tdigest extension returns InvalidOid if the type cannot be
|
||||
* found.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionTypeOid()
|
||||
{
|
||||
Oid tdigestSchemaOid = TDigestExtensionSchema();
|
||||
if (!OidIsValid(tdigestSchemaOid))
|
||||
{
|
||||
return InvalidOid;
|
||||
}
|
||||
char *namespaceName = get_namespace_name(tdigestSchemaOid);
|
||||
return LookupTypeOid(namespaceName, "tdigest");
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* LookupTDigestFunction is a helper function specifically to lookup functions in the
|
||||
* namespace/schema where the tdigest extension is installed. This makes the lookup of
|
||||
* following aggregate functions easier and less repetitive.
|
||||
*/
|
||||
static Oid
|
||||
LookupTDigestFunction(const char *functionName, int argcount, Oid *argtypes)
|
||||
{
|
||||
Oid tdigestSchemaOid = TDigestExtensionSchema();
|
||||
if (!OidIsValid(tdigestSchemaOid))
|
||||
{
|
||||
return InvalidOid;
|
||||
}
|
||||
|
||||
char *namespaceName = get_namespace_name(tdigestSchemaOid);
|
||||
return LookupFuncName(
|
||||
list_make2(makeString(namespaceName), makeString(pstrdup(functionName))),
|
||||
argcount, argtypes, true);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionAggTDigest1 performs a lookup for the Oid of the tdigest aggregate;
|
||||
* tdigest(tdigest)
|
||||
*
|
||||
* If the aggregate is not found InvalidOid is returned.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionAggTDigest1()
|
||||
{
|
||||
return LookupTDigestFunction("tdigest", 1, (Oid[]) { TDigestExtensionTypeOid() });
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionAggTDigest2 performs a lookup for the Oid of the tdigest aggregate;
|
||||
* tdigest(value double precision, compression int)
|
||||
*
|
||||
* If the aggregate is not found InvalidOid is returned.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionAggTDigest2()
|
||||
{
|
||||
return LookupTDigestFunction("tdigest", 2, (Oid[]) { FLOAT8OID, INT4OID });
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionAggTDigestPercentile2 performs a lookup for the Oid of the tdigest
|
||||
* aggregate;
|
||||
* tdigest_percentile(tdigest, double precision)
|
||||
*
|
||||
* If the aggregate is not found InvalidOid is returned.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionAggTDigestPercentile2()
|
||||
{
|
||||
return LookupTDigestFunction("tdigest_percentile", 2,
|
||||
(Oid[]) { TDigestExtensionTypeOid(), FLOAT8OID });
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionAggTDigestPercentile2a performs a lookup for the Oid of the tdigest
|
||||
* aggregate;
|
||||
* tdigest_percentile(tdigest, double precision[])
|
||||
*
|
||||
* If the aggregate is not found InvalidOid is returned.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionAggTDigestPercentile2a(void)
|
||||
{
|
||||
return LookupTDigestFunction("tdigest_percentile", 2,
|
||||
(Oid[]) { TDigestExtensionTypeOid(), FLOAT8ARRAYOID });
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionAggTDigestPercentile3 performs a lookup for the Oid of the tdigest
|
||||
* aggregate;
|
||||
* tdigest_percentile(double precision, int, double precision)
|
||||
*
|
||||
* If the aggregate is not found InvalidOid is returned.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionAggTDigestPercentile3()
|
||||
{
|
||||
return LookupTDigestFunction("tdigest_percentile", 3,
|
||||
(Oid[]) { FLOAT8OID, INT4OID, FLOAT8OID });
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionAggTDigestPercentile3a performs a lookup for the Oid of the tdigest
|
||||
* aggregate;
|
||||
* tdigest_percentile(double precision, int, double precision[])
|
||||
*
|
||||
* If the aggregate is not found InvalidOid is returned.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionAggTDigestPercentile3a(void)
|
||||
{
|
||||
return LookupTDigestFunction("tdigest_percentile", 3,
|
||||
(Oid[]) { FLOAT8OID, INT4OID, FLOAT8ARRAYOID });
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionAggTDigestPercentileOf2 performs a lookup for the Oid of the tdigest
|
||||
* aggregate;
|
||||
* tdigest_percentile_of(tdigest, double precision)
|
||||
*
|
||||
* If the aggregate is not found InvalidOid is returned.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionAggTDigestPercentileOf2()
|
||||
{
|
||||
return LookupTDigestFunction("tdigest_percentile_of", 2,
|
||||
(Oid[]) { TDigestExtensionTypeOid(), FLOAT8OID });
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionAggTDigestPercentileOf2a performs a lookup for the Oid of the tdigest
|
||||
* aggregate;
|
||||
* tdigest_percentile_of(tdigest, double precision[])
|
||||
*
|
||||
* If the aggregate is not found InvalidOid is returned.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionAggTDigestPercentileOf2a(void)
|
||||
{
|
||||
return LookupTDigestFunction("tdigest_percentile_of", 2,
|
||||
(Oid[]) { TDigestExtensionTypeOid(), FLOAT8ARRAYOID });
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionAggTDigestPercentileOf3 performs a lookup for the Oid of the tdigest
|
||||
* aggregate;
|
||||
* tdigest_percentile_of(double precision, int, double precision)
|
||||
*
|
||||
* If the aggregate is not found InvalidOid is returned.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionAggTDigestPercentileOf3()
|
||||
{
|
||||
return LookupTDigestFunction("tdigest_percentile_of", 3,
|
||||
(Oid[]) { FLOAT8OID, INT4OID, FLOAT8OID });
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TDigestExtensionAggTDigestPercentileOf3a performs a lookup for the Oid of the tdigest
|
||||
* aggregate;
|
||||
* tdigest_percentile_of(double precision, int, double precision[])
|
||||
*
|
||||
* If the aggregate is not found InvalidOid is returned.
|
||||
*/
|
||||
Oid
|
||||
TDigestExtensionAggTDigestPercentileOf3a(void)
|
||||
{
|
||||
return LookupTDigestFunction("tdigest_percentile_of", 3,
|
||||
(Oid[]) { FLOAT8OID, INT4OID, FLOAT8ARRAYOID });
|
||||
}
|
|
@ -450,7 +450,7 @@ RegisterCitusConfigVariables(void)
|
|||
gettext_noop("Sets the maximum duration to connect to worker nodes."),
|
||||
NULL,
|
||||
&NodeConnectionTimeout,
|
||||
5 * MS_PER_SECOND, 10 * MS, MS_PER_HOUR,
|
||||
30 * MS_PER_SECOND, 10 * MS, MS_PER_HOUR,
|
||||
PGC_USERSET,
|
||||
GUC_UNIT_MS | GUC_STANDARD,
|
||||
NULL, NULL, NULL);
|
||||
|
|
|
@ -409,6 +409,13 @@ OpenConnectionsToWorkersInParallel(TargetWorkerSet targetWorkerSet, const char *
|
|||
MultiConnection *connection = StartNodeUserDatabaseConnection(connectionFlags,
|
||||
nodeName, nodePort,
|
||||
user, NULL);
|
||||
|
||||
/*
|
||||
* connection can only be NULL for optional connections, which we don't
|
||||
* support in this codepath.
|
||||
*/
|
||||
Assert((connectionFlags & OPTIONAL_CONNECTION) == 0);
|
||||
Assert(connection != NULL);
|
||||
connectionList = lappend(connectionList, connection);
|
||||
}
|
||||
return connectionList;
|
||||
|
@ -476,6 +483,12 @@ SendCommandToWorkersParamsInternal(TargetWorkerSet targetWorkerSet, const char *
|
|||
nodeName, nodePort,
|
||||
user, NULL);
|
||||
|
||||
/*
|
||||
* connection can only be NULL for optional connections, which we don't
|
||||
* support in this codepath.
|
||||
*/
|
||||
Assert((connectionFlags & OPTIONAL_CONNECTION) == 0);
|
||||
Assert(connection != NULL);
|
||||
MarkRemoteTransactionCritical(connection);
|
||||
|
||||
connectionList = lappend(connectionList, connection);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include "distributed/citus_nodefuncs.h"
|
||||
#include "distributed/multi_server_executor.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "utils/datum.h"
|
||||
|
||||
|
||||
|
@ -73,6 +74,18 @@ CitusSetTag(Node *node, int tag)
|
|||
} \
|
||||
while (0)
|
||||
|
||||
#define COPY_STRING_LIST(fldname) \
|
||||
do { \
|
||||
char *curString = NULL; \
|
||||
List *newList = NIL; \
|
||||
foreach_ptr(curString, from->fldname) { \
|
||||
char *newString = curString ? pstrdup(curString) : (char *) NULL; \
|
||||
newList = lappend(newList, newString); \
|
||||
} \
|
||||
newnode->fldname = newList; \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
static void CopyTaskQuery(Task *newnode, Task *from);
|
||||
|
||||
static void
|
||||
|
@ -271,13 +284,13 @@ CopyTaskQuery(Task *newnode, Task *from)
|
|||
|
||||
case TASK_QUERY_TEXT_PER_PLACEMENT:
|
||||
{
|
||||
COPY_NODE_FIELD(taskQuery.data.perPlacementQueryStrings);
|
||||
COPY_STRING_LIST(taskQuery.data.perPlacementQueryStrings);
|
||||
break;
|
||||
}
|
||||
|
||||
case TASK_QUERY_TEXT_LIST:
|
||||
{
|
||||
COPY_NODE_FIELD(taskQuery.data.queryStringList);
|
||||
COPY_STRING_LIST(taskQuery.data.queryStringList);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -112,6 +112,7 @@ static void MaintenanceDaemonShmemExit(int code, Datum arg);
|
|||
static void MaintenanceDaemonErrorContext(void *arg);
|
||||
static bool LockCitusExtension(void);
|
||||
static bool MetadataSyncTriggeredCheckAndReset(MaintenanceDaemonDBData *dbData);
|
||||
static void WarnMaintenanceDaemonNotStarted(void);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -153,8 +154,10 @@ InitializeMaintenanceDaemonBackend(void)
|
|||
|
||||
if (dbData == NULL)
|
||||
{
|
||||
/* FIXME: better message, reference relevant guc in hint */
|
||||
ereport(ERROR, (errmsg("ran out of database slots")));
|
||||
WarnMaintenanceDaemonNotStarted();
|
||||
LWLockRelease(&MaintenanceDaemonControl->lock);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* maintenance daemon can ignore itself */
|
||||
|
@ -169,8 +172,6 @@ InitializeMaintenanceDaemonBackend(void)
|
|||
BackgroundWorker worker;
|
||||
BackgroundWorkerHandle *handle = NULL;
|
||||
|
||||
dbData->userOid = extensionOwner;
|
||||
|
||||
memset(&worker, 0, sizeof(worker));
|
||||
|
||||
SafeSnprintf(worker.bgw_name, sizeof(worker.bgw_name),
|
||||
|
@ -200,11 +201,15 @@ InitializeMaintenanceDaemonBackend(void)
|
|||
|
||||
if (!RegisterDynamicBackgroundWorker(&worker, &handle))
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not start maintenance background worker"),
|
||||
errhint("Increasing max_worker_processes might help.")));
|
||||
WarnMaintenanceDaemonNotStarted();
|
||||
dbData->daemonStarted = false;
|
||||
LWLockRelease(&MaintenanceDaemonControl->lock);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
dbData->daemonStarted = true;
|
||||
dbData->userOid = extensionOwner;
|
||||
dbData->workerPid = 0;
|
||||
dbData->triggerMetadataSync = false;
|
||||
LWLockRelease(&MaintenanceDaemonControl->lock);
|
||||
|
@ -235,6 +240,17 @@ InitializeMaintenanceDaemonBackend(void)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* WarnMaintenanceDaemonNotStarted warns that maintenanced couldn't be started.
|
||||
*/
|
||||
static void
|
||||
WarnMaintenanceDaemonNotStarted(void)
|
||||
{
|
||||
ereport(WARNING, (errmsg("could not start maintenance background worker"),
|
||||
errhint("Increasing max_worker_processes might help.")));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusMaintenanceDaemonMain is the maintenance daemon's main routine, it'll
|
||||
* be started by the background worker infrastructure. If it errors out,
|
||||
|
|
|
@ -70,7 +70,7 @@ typedef enum CitusNodeTag
|
|||
} CitusNodeTag;
|
||||
|
||||
|
||||
const char** CitusNodeTagNames;
|
||||
extern const char** CitusNodeTagNames;
|
||||
|
||||
|
||||
typedef struct CitusNode
|
||||
|
|
|
@ -196,6 +196,7 @@ extern void multi_join_restriction_hook(PlannerInfo *root,
|
|||
RelOptInfo *innerrel,
|
||||
JoinType jointype,
|
||||
JoinPathExtraData *extra);
|
||||
extern bool HasUnresolvedExternParamsWalker(Node *expression, ParamListInfo boundParams);
|
||||
extern bool IsModifyCommand(Query *query);
|
||||
extern bool IsModifyDistributedPlan(struct DistributedPlan *distributedPlan);
|
||||
extern void EnsurePartitionTableNotReplicated(Oid relationId);
|
||||
|
|
|
@ -194,6 +194,7 @@ extern Oid DistPlacementGroupidIndexId(void);
|
|||
extern Oid DistObjectPrimaryKeyIndexId(void);
|
||||
|
||||
/* type oids */
|
||||
extern Oid LookupTypeOid(char *schemaNameSting, char *typeNameString);
|
||||
extern Oid CitusCopyFormatTypeId(void);
|
||||
|
||||
/* function oids */
|
||||
|
@ -220,5 +221,4 @@ extern char * CitusExtensionOwnerName(void);
|
|||
extern char * CurrentUserName(void);
|
||||
extern const char * CurrentDatabaseName(void);
|
||||
|
||||
|
||||
#endif /* METADATA_CACHE_H */
|
||||
|
|
|
@ -80,9 +80,21 @@ typedef enum
|
|||
AGGREGATE_TOPN_UNION_AGG = 19,
|
||||
AGGREGATE_ANY_VALUE = 20,
|
||||
|
||||
/* support for github.com/tvondra/tdigest */
|
||||
AGGREGATE_TDIGEST_COMBINE = 21,
|
||||
AGGREGATE_TDIGEST_ADD_DOUBLE = 22,
|
||||
AGGREGATE_TDIGEST_PERCENTILE_ADD_DOUBLE = 23,
|
||||
AGGREGATE_TDIGEST_PERCENTILE_ADD_DOUBLEARRAY = 24,
|
||||
AGGREGATE_TDIGEST_PERCENTILE_TDIGEST_DOUBLE = 25,
|
||||
AGGREGATE_TDIGEST_PERCENTILE_TDIGEST_DOUBLEARRAY = 26,
|
||||
AGGREGATE_TDIGEST_PERCENTILE_OF_ADD_DOUBLE = 27,
|
||||
AGGREGATE_TDIGEST_PERCENTILE_OF_ADD_DOUBLEARRAY = 28,
|
||||
AGGREGATE_TDIGEST_PERCENTILE_OF_TDIGEST_DOUBLE = 29,
|
||||
AGGREGATE_TDIGEST_PERCENTILE_OF_TDIGEST_DOUBLEARRAY = 30,
|
||||
|
||||
/* AGGREGATE_CUSTOM must come last */
|
||||
AGGREGATE_CUSTOM_COMBINE = 21,
|
||||
AGGREGATE_CUSTOM_ROW_GATHER = 22,
|
||||
AGGREGATE_CUSTOM_COMBINE = 31,
|
||||
AGGREGATE_CUSTOM_ROW_GATHER = 32,
|
||||
} AggregateType;
|
||||
|
||||
|
||||
|
|
|
@ -497,7 +497,10 @@ extern Task * CreateBasicTask(uint64 jobId, uint32 taskId, TaskType taskType,
|
|||
char *queryString);
|
||||
|
||||
extern OpExpr * MakeOpExpression(Var *variable, int16 strategyNumber);
|
||||
extern Node * AddAnyValueAggregates(Node *node, void *context);
|
||||
extern Node * WrapUngroupedVarsInAnyValueAggregate(Node *expression,
|
||||
List *groupClauseList,
|
||||
List *targetList,
|
||||
bool checkExpressionEquality);
|
||||
|
||||
/*
|
||||
* Function declarations for building, updating constraints and simple operator
|
||||
|
|
|
@ -20,5 +20,7 @@
|
|||
extern List * PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList,
|
||||
Const **partitionValueConst);
|
||||
extern bool ContainsFalseClause(List *whereClauseList);
|
||||
|
||||
extern Const * TransformPartitionRestrictionValue(Var *partitionColumn,
|
||||
Const *restrictionValue,
|
||||
bool missingOk);
|
||||
#endif /* SHARD_PRUNING_H_ */
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* tdigest_extension.c
|
||||
* Helper functions to get access to tdigest specific data.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef CITUS_TDIGEST_EXTENSION_H
|
||||
#define CITUS_TDIGEST_EXTENSION_H
|
||||
|
||||
/* tdigest related functions */
|
||||
extern Oid TDigestExtensionSchema(void);
|
||||
extern Oid TDigestExtensionTypeOid(void);
|
||||
extern Oid TDigestExtensionAggTDigest1(void);
|
||||
extern Oid TDigestExtensionAggTDigest2(void);
|
||||
extern Oid TDigestExtensionAggTDigestPercentile2(void);
|
||||
extern Oid TDigestExtensionAggTDigestPercentile2a(void);
|
||||
extern Oid TDigestExtensionAggTDigestPercentile3(void);
|
||||
extern Oid TDigestExtensionAggTDigestPercentile3a(void);
|
||||
extern Oid TDigestExtensionAggTDigestPercentileOf2(void);
|
||||
extern Oid TDigestExtensionAggTDigestPercentileOf2a(void);
|
||||
extern Oid TDigestExtensionAggTDigestPercentileOf3(void);
|
||||
extern Oid TDigestExtensionAggTDigestPercentileOf3a(void);
|
||||
|
||||
#endif /* CITUS_TDIGEST_EXTENSION_H */
|
|
@ -316,7 +316,7 @@ insert into stock VALUES
|
|||
SELECT create_distributed_table('stock','s_w_id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$ch_bench_having.stock$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -324,7 +324,7 @@ insert into stock VALUES
|
|||
SELECT create_distributed_table('stock','s_w_id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$ch_bench_having.stock$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -146,7 +146,7 @@ NOTICE: executing the copy locally for shard xxxxx
|
|||
NOTICE: Copying data from local table...
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$coordinator_shouldhaveshards.dist_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -182,7 +182,7 @@ NOTICE: executing the copy locally for shard xxxxx
|
|||
NOTICE: Copying data from local table...
|
||||
NOTICE: executing the copy locally for shard xxxxx
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$coordinator_shouldhaveshards.dist_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -375,13 +375,48 @@ NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.co
|
|||
100 | 3 | 2 | 3 | 2 | 1
|
||||
(1 row)
|
||||
|
||||
-- issue #3801
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE dist_table(a int);
|
||||
ERROR: relation "dist_table" already exists
|
||||
SELECT create_distributed_table('dist_table', 'a');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$coordinator_shouldhaveshards.dist_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
-- this will use perPlacementQueryStrings, make sure it works correctly with
|
||||
-- copying task
|
||||
INSERT INTO dist_table SELECT a + 1 FROM dist_table;
|
||||
ROLLBACK;
|
||||
BEGIN;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE dist_table1(a int);
|
||||
-- this will use queryStringList, make sure it works correctly with
|
||||
-- copying task
|
||||
SELECT create_distributed_table('dist_table1', 'a');
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1503023, 'coordinator_shouldhaveshards', 'CREATE TABLE coordinator_shouldhaveshards.dist_table1 (a integer)');SELECT worker_apply_shard_ddl_command (1503023, 'coordinator_shouldhaveshards', 'ALTER TABLE coordinator_shouldhaveshards.dist_table1 OWNER TO postgres')
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1503025, 'coordinator_shouldhaveshards', 'CREATE TABLE coordinator_shouldhaveshards.dist_table1 (a integer)');SELECT worker_apply_shard_ddl_command (1503025, 'coordinator_shouldhaveshards', 'ALTER TABLE coordinator_shouldhaveshards.dist_table1 OWNER TO postgres')
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1503026, 'coordinator_shouldhaveshards', 'CREATE TABLE coordinator_shouldhaveshards.dist_table1 (a integer)');SELECT worker_apply_shard_ddl_command (1503026, 'coordinator_shouldhaveshards', 'ALTER TABLE coordinator_shouldhaveshards.dist_table1 OWNER TO postgres')
|
||||
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1503028, 'coordinator_shouldhaveshards', 'CREATE TABLE coordinator_shouldhaveshards.dist_table1 (a integer)');SELECT worker_apply_shard_ddl_command (1503028, 'coordinator_shouldhaveshards', 'ALTER TABLE coordinator_shouldhaveshards.dist_table1 OWNER TO postgres')
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
RESET citus.enable_cte_inlining;
|
||||
DELETE FROM test;
|
||||
DROP TABLE test;
|
||||
DROP TABLE dist_table;
|
||||
DROP SCHEMA coordinator_shouldhaveshards CASCADE;
|
||||
NOTICE: drop cascades to 4 other objects
|
||||
DETAIL: drop cascades to table dist_table
|
||||
drop cascades to table ref
|
||||
NOTICE: drop cascades to 3 other objects
|
||||
DETAIL: drop cascades to table ref
|
||||
drop cascades to table ref_1503016
|
||||
drop cascades to table local
|
||||
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', false);
|
||||
|
|
|
@ -5,7 +5,7 @@ INSERT INTO tt1 VALUES(1,2),(2,3),(3,4);
|
|||
SELECT create_distributed_table('tt1','id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$cte_nested_modifications.tt1$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -17,7 +17,7 @@ INSERT INTO tt2 VALUES(3,3),(4,4),(5,5);
|
|||
SELECT create_distributed_table('tt2','id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$cte_nested_modifications.tt2$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -5,7 +5,7 @@ INSERT INTO tt1 VALUES(1,2),(2,3),(3,4);
|
|||
SELECT create_distributed_table('tt1','id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$cte_prepared_modify.tt1$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -17,7 +17,7 @@ INSERT INTO tt2 VALUES(3,3),(4,4),(5,5);
|
|||
SELECT create_distributed_table('tt2','id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$cte_prepared_modify.tt2$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -36,7 +36,7 @@ INSERT INTO test_propagate VALUES (1, 'aesop', U&'\00E4sop'), (2, U&'Vo\1E9Er',
|
|||
SELECT create_distributed_table('test_propagate', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$collation_tests.test_propagate$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -14,7 +14,7 @@ INSERT INTO test VALUES
|
|||
SELECT create_reference_table('ref');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$expression_reference_join.ref$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -24,7 +24,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
SELECT create_distributed_table('test', 'x');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$expression_reference_join.test$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -62,7 +62,7 @@ INSERT INTO r1 (id, name) VALUES
|
|||
SELECT create_reference_table('r1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$fail_connect.r1$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -225,7 +225,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()');
|
|||
SELECT create_distributed_table('test_table', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$create_distributed_table_non_empty_failure.test_table$$)
|
||||
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
||||
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
|
||||
|
@ -260,7 +260,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").cancel(' || pg_b
|
|||
SELECT create_distributed_table('test_table', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$create_distributed_table_non_empty_failure.test_table$$)
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
|
||||
|
|
|
@ -108,7 +108,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").kill()');
|
|||
SELECT create_reference_table('ref_table');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$failure_reference_table.ref_table$$)
|
||||
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
||||
SELECT count(*) FROM pg_dist_shard_placement;
|
||||
|
@ -127,7 +127,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").cancel(' || pg_
|
|||
SELECT create_reference_table('ref_table');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$failure_reference_table.ref_table$$)
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT count(*) FROM pg_dist_shard_placement;
|
||||
|
@ -219,7 +219,7 @@ BEGIN;
|
|||
SELECT create_reference_table('ref_table');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$failure_reference_table.ref_table$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -246,7 +246,7 @@ BEGIN;
|
|||
SELECT create_reference_table('ref_table');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$failure_reference_table.ref_table$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -427,7 +427,7 @@ CREATE TABLE t3 AS SELECT * FROM t2;
|
|||
SELECT create_distributed_table('t3', 'a');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_shard.t3$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -527,7 +527,7 @@ CREATE TABLE t3 AS SELECT * FROM t2;
|
|||
SELECT create_distributed_table('t3', 'a');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_shard.t3$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -1273,7 +1273,7 @@ BEGIN;
|
|||
SELECT create_reference_table('test_table_1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$test_fkey_to_ref_in_tx.test_table_1$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -1300,7 +1300,7 @@ BEGIN;
|
|||
SELECT create_reference_table('test_table_1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$test_fkey_to_ref_in_tx.test_table_1$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -747,7 +747,7 @@ INSERT INTO referencing_table VALUES (1,1), (2,2), (3,3);
|
|||
SELECT create_reference_table('referenced_table');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$fkey_reference_table.referenced_table$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -1309,7 +1309,7 @@ BEGIN;
|
|||
SELECT create_reference_table('test_table_1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$fkey_reference_table.test_table_1$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -491,7 +491,7 @@ NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1
|
|||
NOTICE: executing the copy locally for shard xxxxx
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$local_shard_copy.ref_table$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -1674,7 +1674,7 @@ RESET client_min_messages;
|
|||
RESET citus.log_local_commands;
|
||||
\c - - - :master_port
|
||||
SET citus.next_shard_id TO 1480000;
|
||||
-- local execution with custom type
|
||||
-- test both local and remote execution with custom type
|
||||
SET citus.replication_model TO "streaming";
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TYPE invite_resp AS ENUM ('yes', 'no', 'maybe');
|
||||
|
@ -1690,6 +1690,273 @@ SELECT create_distributed_table('event_responses', 'event_id');
|
|||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO event_responses VALUES (1, 1, 'yes'), (2, 2, 'yes'), (3, 3, 'no'), (4, 4, 'no');
|
||||
CREATE OR REPLACE FUNCTION regular_func(p invite_resp)
|
||||
RETURNS int AS $$
|
||||
DECLARE
|
||||
q1Result INT;
|
||||
q2Result INT;
|
||||
q3Result INT;
|
||||
BEGIN
|
||||
SELECT count(*) INTO q1Result FROM event_responses WHERE response = $1;
|
||||
SELECT count(*) INTO q2Result FROM event_responses e1 LEFT JOIN event_responses e2 USING (event_id) WHERE e2.response = $1;
|
||||
SELECT count(*) INTO q3Result FROM (SELECT * FROM event_responses WHERE response = $1 LIMIT 5) as foo;
|
||||
RETURN q3Result+q2Result+q1Result;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
SELECT regular_func('yes');
|
||||
regular_func
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
SELECT regular_func('yes');
|
||||
regular_func
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
SELECT regular_func('yes');
|
||||
regular_func
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
SELECT regular_func('yes');
|
||||
regular_func
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
SELECT regular_func('yes');
|
||||
regular_func
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
SELECT regular_func('yes');
|
||||
regular_func
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
SELECT regular_func('yes');
|
||||
regular_func
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
SELECT regular_func('yes');
|
||||
regular_func
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE PROCEDURE regular_procedure(p invite_resp)
|
||||
AS $$
|
||||
BEGIN
|
||||
PERFORM * FROM event_responses WHERE response = $1;
|
||||
PERFORM * FROM event_responses e1 LEFT JOIN event_responses e2 USING (event_id) WHERE e2.response = $1;
|
||||
PERFORM * FROM (SELECT * FROM event_responses WHERE response = $1 LIMIT 5) as foo;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
PREPARE multi_shard_no_dist_key(invite_resp) AS select * from event_responses where response = $1::invite_resp LIMIT 1;
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
PREPARE multi_shard_with_dist_key(int, invite_resp) AS select * from event_responses where event_id > $1 AND response = $2::invite_resp LIMIT 1;
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
2 | 2 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
2 | 2 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
2 | 2 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
2 | 2 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
2 | 2 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
2 | 2 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
2 | 2 | yes
|
||||
(1 row)
|
||||
|
||||
PREPARE query_pushdown_no_dist_key(invite_resp) AS select * from event_responses e1 LEFT JOIN event_responses e2 USING(event_id) where e1.response = $1::invite_resp LIMIT 1;
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
event_id | user_id | response | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
event_id | user_id | response | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
event_id | user_id | response | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
event_id | user_id | response | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
event_id | user_id | response | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
event_id | user_id | response | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
event_id | user_id | response | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes | 1 | yes
|
||||
(1 row)
|
||||
|
||||
PREPARE insert_select_via_coord(invite_resp) AS INSERT INTO event_responses SELECT * FROM event_responses where response = $1::invite_resp LIMIT 1 ON CONFLICT (event_id, user_id) DO NOTHING ;
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
PREPARE insert_select_pushdown(invite_resp) AS INSERT INTO event_responses SELECT * FROM event_responses where response = $1::invite_resp ON CONFLICT (event_id, user_id) DO NOTHING;
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
PREPARE router_select_with_no_dist_key_filter(invite_resp) AS select * from event_responses where event_id = 1 AND response = $1::invite_resp LIMIT 1;
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
event_id | user_id | response
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | yes
|
||||
(1 row)
|
||||
|
||||
-- rest of the tests assume the table is empty
|
||||
TRUNCATE event_responses;
|
||||
CREATE OR REPLACE PROCEDURE register_for_event(p_event_id int, p_user_id int, p_choice invite_resp)
|
||||
LANGUAGE plpgsql AS $fn$
|
||||
BEGIN
|
||||
|
|
|
@ -139,7 +139,7 @@ WHERE lineitem_local_to_hash_part.l_orderkey=orders_local_to_hash_part.o_orderke
|
|||
SELECT create_distributed_table('lineitem_local_to_hash_part', 'l_orderkey');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$materialized_view.lineitem_local_to_hash_part$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -149,7 +149,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
SELECT create_distributed_table('orders_local_to_hash_part', 'o_orderkey');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$materialized_view.orders_local_to_hash_part$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -561,7 +561,7 @@ SET search_path TO 'sc3';
|
|||
SELECT create_distributed_table('alter_add_prim_key', 'x');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$sc3.alter_add_prim_key$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -202,7 +202,7 @@ HINT: Empty your table before distributing it.
|
|||
SELECT create_distributed_table('data_load_test', 'col1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.data_load_test$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -264,7 +264,7 @@ INSERT INTO data_load_test VALUES (132, 'hello');
|
|||
SELECT create_distributed_table('data_load_test', 'col1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.data_load_test$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -288,7 +288,7 @@ INSERT INTO data_load_test1 VALUES (132, 'hello');
|
|||
SELECT create_distributed_table('data_load_test1', 'col1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.data_load_test1$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -300,7 +300,7 @@ INSERT INTO data_load_test2 VALUES (132, 'world');
|
|||
SELECT create_distributed_table('data_load_test2', 'col1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.data_load_test2$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -238,7 +238,7 @@ INSERT INTO data_load_test VALUES (132, 'hello');
|
|||
SELECT create_distributed_table('data_load_test', 'col1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.data_load_test$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -255,7 +255,7 @@ INSERT INTO data_load_test VALUES (132, 'hello');
|
|||
SELECT create_distributed_table('data_load_test', 'col1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.data_load_test$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -271,7 +271,7 @@ INSERT INTO data_load_test VALUES (132, 'hello');
|
|||
SELECT create_distributed_table('data_load_test', 'col1');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.data_load_test$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -289,7 +289,7 @@ ALTER TABLE data_load_test DROP COLUMN col1;
|
|||
SELECT create_distributed_table('data_load_test', 'col3');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.data_load_test$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -384,7 +384,7 @@ INSERT INTO rollback_table VALUES(3, 'Name_3');
|
|||
SELECT create_distributed_table('rollback_table','id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.rollback_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -514,7 +514,7 @@ INSERT INTO tt1 VALUES(1);
|
|||
SELECT create_distributed_table('tt1','id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.tt1$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -573,7 +573,7 @@ CREATE TABLE stage_table (LIKE sample_table);
|
|||
SELECT create_distributed_table('stage_table', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.stage_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -647,7 +647,7 @@ insert into sc.ref SELECT s FROM generate_series(0, 100) s;
|
|||
SELECT create_reference_table('sc.ref');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$sc.ref$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -659,7 +659,7 @@ insert into sc.hash SELECT s FROM generate_series(0, 100) s;
|
|||
SELECT create_distributed_table('sc.hash', 'a');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$sc.hash$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -675,7 +675,7 @@ insert into sc2.hash SELECT s FROM generate_series(0, 100) s;
|
|||
SELECT create_distributed_table('sc2.hash', 'a');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$sc2.hash$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -687,7 +687,7 @@ insert into sc2.ref SELECT s FROM generate_series(0, 100) s;
|
|||
SELECT create_reference_table('sc2.ref');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$sc2.ref$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -731,7 +731,7 @@ ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX alter_replica_table
|
|||
SELECT create_distributed_table('alter_replica_table', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$sc4.alter_replica_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -759,7 +759,7 @@ ALTER TABLE sc5.alter_replica_table REPLICA IDENTITY FULL;
|
|||
SELECT create_distributed_table('sc5.alter_replica_table', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$sc5.alter_replica_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -787,7 +787,7 @@ ALTER TABLE sc6.alter_replica_table REPLICA IDENTITY USING INDEX unique_idx;
|
|||
SELECT create_distributed_table('sc6.alter_replica_table', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$sc6.alter_replica_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -814,7 +814,7 @@ ALTER TABLE alter_replica_table REPLICA IDENTITY USING INDEX unique_idx;
|
|||
SELECT create_distributed_table('alter_replica_table', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.alter_replica_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -134,7 +134,7 @@ ALTER EXTENSION citus UPDATE TO '9.3-2';
|
|||
SHOW citus.version;
|
||||
citus.version
|
||||
---------------------------------------------------------------------
|
||||
9.3devel
|
||||
9.3.2
|
||||
(1 row)
|
||||
|
||||
-- ensure no objects were created outside pg_catalog
|
||||
|
|
|
@ -379,7 +379,7 @@ RESET ROLE;
|
|||
SELECT create_distributed_table('my_table_with_data', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.my_table_with_data$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -409,7 +409,7 @@ SET ROLE read_access;
|
|||
SELECT create_distributed_table('my_role_table_with_data', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.my_role_table_with_data$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -568,7 +568,7 @@ RESET ROLE;
|
|||
SELECT create_distributed_table('full_access_user_schema.t1', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$full_access_user_schema.t1$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -27,11 +27,11 @@ INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03');
|
|||
SELECT create_distributed_table('partitioning_test', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.partitioning_test_2009$$)
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.partitioning_test_2010$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -141,7 +141,7 @@ INSERT INTO partitioning_test_2012 VALUES (6, '2012-07-07');
|
|||
ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2012 FOR VALUES FROM ('2012-01-01') TO ('2013-01-01');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.partitioning_test_2012$$)
|
||||
-- see from MX node, attached partition is distributed as well
|
||||
\c - - - :worker_1_port
|
||||
|
|
|
@ -29,11 +29,11 @@ INSERT INTO partitioning_hash_test VALUES (4, 4);
|
|||
SELECT create_distributed_table('partitioning_test', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.partitioning_test_2009$$)
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.partitioning_test_2010$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -43,11 +43,11 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
SELECT create_distributed_table('partitioning_hash_test', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.partitioning_hash_test_0$$)
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.partitioning_hash_test_1$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -170,7 +170,7 @@ INSERT INTO partitioning_test_2012 VALUES (6, '2012-07-07');
|
|||
ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2012 FOR VALUES FROM ('2012-01-01') TO ('2013-01-01');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.partitioning_test_2012$$)
|
||||
-- attached partition is distributed as well
|
||||
SELECT
|
||||
|
@ -215,7 +215,7 @@ INSERT INTO partitioning_hash_test_2 VALUES (8, 5);
|
|||
ALTER TABLE partitioning_hash_test ATTACH PARTITION partitioning_hash_test_2 FOR VALUES WITH (MODULUS 3, REMAINDER 2);
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.partitioning_hash_test_2$$)
|
||||
INSERT INTO partitioning_hash_test VALUES (9, 12);
|
||||
-- see the data is loaded to shards
|
||||
|
@ -711,7 +711,7 @@ INSERT INTO partitioning_test_reference SELECT a, a FROM generate_series(1, 50)
|
|||
SELECT create_reference_table('partitioning_test_reference');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.partitioning_test_reference$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -363,9 +363,189 @@ EXECUTE coerce_numeric_2(1);
|
|||
1 | test value
|
||||
(1 row)
|
||||
|
||||
-- Test that we can insert an integer literal into a numeric column as well
|
||||
CREATE TABLE numeric_test (id numeric(6, 1), val int);
|
||||
SELECT create_distributed_table('numeric_test', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO numeric_test VALUES (21, 87) RETURNING *;
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
21.0 | 87
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM numeric_test WHERE id = 21;
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
21.0 | 87
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM numeric_test WHERE id = 21::int;
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
21.0 | 87
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM numeric_test WHERE id = 21::bigint;
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
21.0 | 87
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM numeric_test WHERE id = 21.0;
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
21.0 | 87
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM numeric_test WHERE id = 21.0::numeric;
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
21.0 | 87
|
||||
(1 row)
|
||||
|
||||
PREPARE insert_p(int) AS INSERT INTO numeric_test VALUES ($1, 87) RETURNING *;
|
||||
EXECUTE insert_p(1);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
1.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE insert_p(2);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
2.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE insert_p(3);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
3.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE insert_p(4);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
4.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE insert_p(5);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
5.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE insert_p(6);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
6.0 | 87
|
||||
(1 row)
|
||||
|
||||
PREPARE select_p(int) AS SELECT * FROM numeric_test WHERE id=$1;
|
||||
EXECUTE select_p(1);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
1.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE select_p(2);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
2.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE select_p(3);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
3.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE select_p(4);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
4.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE select_p(5);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
5.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE select_p(6);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
6.0 | 87
|
||||
(1 row)
|
||||
|
||||
SET citus.enable_fast_path_router_planner TO false;
|
||||
EXECUTE select_p(1);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
1.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE select_p(2);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
2.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE select_p(3);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
3.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE select_p(4);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
4.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE select_p(5);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
5.0 | 87
|
||||
(1 row)
|
||||
|
||||
EXECUTE select_p(6);
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
6.0 | 87
|
||||
(1 row)
|
||||
|
||||
-- make sure that we don't return wrong resuls
|
||||
INSERT INTO numeric_test VALUES (21.1, 87) RETURNING *;
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
21.1 | 87
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM numeric_test WHERE id = 21;
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
21.0 | 87
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM numeric_test WHERE id = 21::numeric;
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
21.0 | 87
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM numeric_test WHERE id = 21.1::numeric;
|
||||
id | val
|
||||
---------------------------------------------------------------------
|
||||
21.1 | 87
|
||||
(1 row)
|
||||
|
||||
SET search_path TO public;
|
||||
DROP SCHEMA prune_shard_list CASCADE;
|
||||
NOTICE: drop cascades to 9 other objects
|
||||
NOTICE: drop cascades to 10 other objects
|
||||
DETAIL: drop cascades to function prune_shard_list.prune_using_no_values(regclass)
|
||||
drop cascades to function prune_shard_list.prune_using_single_value(regclass,text)
|
||||
drop cascades to function prune_shard_list.prune_using_either_value(regclass,text,text)
|
||||
|
@ -375,3 +555,4 @@ drop cascades to function prune_shard_list.print_sorted_shard_intervals(regclass
|
|||
drop cascades to table prune_shard_list.pruning
|
||||
drop cascades to table prune_shard_list.pruning_range
|
||||
drop cascades to table prune_shard_list.coerce_hash
|
||||
drop cascades to table prune_shard_list.numeric_test
|
||||
|
|
|
@ -208,11 +208,11 @@ INSERT INTO partitioning_test VALUES (2, '2010-07-07');
|
|||
SELECT create_distributed_table('partitioning_test', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_real_time_transaction.partitioning_test_2009$$)
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_real_time_transaction.partitioning_test_2010$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -6,7 +6,7 @@ INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01');
|
|||
SELECT create_reference_table('reference_table_test');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.reference_table_test$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -1639,6 +1639,15 @@ ORDER BY 1;
|
|||
-> Seq Scan on colocated_table_test_1250005 colocated_table_test
|
||||
(25 rows)
|
||||
|
||||
WITH a as (SELECT rt.value_2 FROM reference_table_test rt where rt.value_2 = 2)
|
||||
SELECT ct.value_1, count(*) FROM colocated_table_test ct join a on ct.value_1 = a.value_2
|
||||
WHERE exists (select * from a)
|
||||
GROUP BY 1 ORDER BY 1;
|
||||
value_1 | count
|
||||
---------------------------------------------------------------------
|
||||
2 | 5
|
||||
(1 row)
|
||||
|
||||
-- clean up tables, ...
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SEQUENCE example_ref_value_seq;
|
||||
|
|
|
@ -228,7 +228,7 @@ INSERT INTO tt1 VALUES (1,11), (3,15), (5,17), (6,19), (8,17), (2,12);
|
|||
SELECT create_distributed_table('tt1','id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.tt1_1120$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -345,7 +345,7 @@ INSERT INTO test_table_1 VALUES(3, '2111-01-12 08:35:19', 9);
|
|||
SELECT create_distributed_table('test_table_1', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test_table_1$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -800,7 +800,7 @@ INSERT INTO test_table_2 VALUES(3, random());
|
|||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.test_table_2$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -308,7 +308,7 @@ INSERT INTO test_local_truncate VALUES (1,2);
|
|||
SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_truncate.test_local_truncate$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -347,7 +347,7 @@ INSERT INTO test_local_truncate VALUES (1,2);
|
|||
SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_truncate.test_local_truncate$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -395,7 +395,7 @@ DETAIL: This UDF only truncates local records of distributed tables.
|
|||
SELECT create_distributed_table('referenced_table', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_truncate.referenced_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -410,7 +410,7 @@ DETAIL: Table "referencing_table" references "referenced_table"
|
|||
SELECT create_distributed_table('referencing_table', 'ref_id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_truncate.referencing_table$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -485,7 +485,7 @@ INSERT INTO ref SELECT x,x FROM generate_series(1,10000) x;
|
|||
SELECT create_reference_table('ref');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_truncate.ref$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -525,7 +525,7 @@ INSERT INTO t1 VALUES(1,1);
|
|||
SELECT create_distributed_table('t1', 'a');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_truncate.t1$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -37,7 +37,7 @@ insert into gen2 (id, val1) values (1,4),(3,6),(5,2),(7,2);
|
|||
select create_distributed_table('gen1', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$test_pg12.gen1$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -47,6 +47,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
select create_distributed_table('gen2', 'val2');
|
||||
ERROR: cannot distribute relation: gen2
|
||||
DETAIL: Distribution column must not use GENERATED ALWAYS AS (...) STORED.
|
||||
copy gen1 to :'temp_dir''pg12_copy_test_generated';
|
||||
insert into gen1 (id, val1) values (2,4),(4,6),(6,2),(8,2);
|
||||
insert into gen2 (id, val1) values (2,4),(4,6),(6,2),(8,2);
|
||||
select * from gen1 order by 1,2,3;
|
||||
|
@ -75,6 +76,17 @@ select * from gen2 order by 1,2,3;
|
|||
8 | 2 | 4
|
||||
(8 rows)
|
||||
|
||||
truncate gen1;
|
||||
copy gen1 from :'temp_dir''pg12_copy_test_generated';
|
||||
select * from gen1 order by 1,2,3;
|
||||
id | val2 | val1
|
||||
---------------------------------------------------------------------
|
||||
1 | 6 | 4
|
||||
3 | 8 | 6
|
||||
5 | 4 | 2
|
||||
7 | 4 | 2
|
||||
(4 rows)
|
||||
|
||||
-- Test new VACUUM/ANALYZE options
|
||||
analyze (skip_locked) gen1;
|
||||
vacuum (skip_locked) gen1;
|
||||
|
@ -271,7 +283,7 @@ DETAIL: Key (key, collection_id)=(1, 1000) is not present in table "collections
|
|||
SELECT create_distributed_table('collections_list', 'key');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$test_pg12.collections_list_0$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -281,7 +293,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
SELECT create_distributed_table('collection_users', 'key');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$test_pg12.collection_users$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -302,7 +314,7 @@ INSERT INTO test (x,y) SELECT i,i*3 from generate_series(1, 100) i;
|
|||
SELECT create_distributed_table('test', 'x');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$test_pg12.test$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -392,7 +404,7 @@ ERROR: Hash distributed partition columns may not use a non deterministic colla
|
|||
select create_distributed_table('col_test', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$test_pg12.col_test$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -59,7 +59,7 @@ drop cascades to table "weird.table"
|
|||
SELECT create_distributed_table('data', 'key');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$dumper.data$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -69,7 +69,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
SELECT create_distributed_table('"weird.table"', 'key,');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$dumper."weird.table"$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -972,7 +972,7 @@ BEGIN;
|
|||
SELECT create_distributed_table('table_3', 'key');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$access_tracking.table_3$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -28,11 +28,11 @@ INSERT INTO collections (key, ts, collection_id, value) VALUES (4, '2009-01-01',
|
|||
SELECT create_distributed_table('collections', 'key');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$partitioned_table_replicated.collections_1$$)
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$partitioned_table_replicated.collections_2$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
@ -48,7 +48,7 @@ INSERT INTO collections_4 SELECT i, '2009-01-01', 4, i FROM generate_series (0,
|
|||
ALTER TABLE collections ATTACH PARTITION collections_4 FOR VALUES IN ( 4 );
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$partitioned_table_replicated.collections_4$$)
|
||||
-- finally attach a distributed table to a distributed table
|
||||
CREATE TABLE collections_5 AS SELECT * FROM collections LIMIT 0;
|
||||
|
|
|
@ -294,7 +294,7 @@ INSERT INTO test_seq_truncate SELECT i FROM generate_series(0, 100) i;
|
|||
SELECT create_distributed_table('test_seq_truncate', 'a');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$test_seq_ddl.test_seq_truncate$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -0,0 +1,151 @@
|
|||
-- setup schema used for sqlsmith runs
|
||||
-- source: https://gist.github.com/will/e8a1e6efd46ac82f1b61d0c0ccab1b52
|
||||
CREATE SCHEMA sqlsmith_failures;
|
||||
SET search_path TO sqlsmith_failures, public;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 1280000;
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
|
||||
\gset
|
||||
begin;
|
||||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
create table countries(
|
||||
id serial primary key
|
||||
, name text
|
||||
, code varchar(2) collate "C" unique
|
||||
);
|
||||
insert into countries(name, code) select 'country-'||i, i::text from generate_series(10,99) i;
|
||||
select create_reference_table('countries');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$sqlsmith_failures.countries$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table orgs (
|
||||
id bigserial primary key
|
||||
, name text
|
||||
, created_at timestamptz default now()
|
||||
);
|
||||
select create_distributed_table('orgs', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\if :server_version_above_eleven
|
||||
-- pg12 and above support generated columns
|
||||
create table users (
|
||||
id bigserial
|
||||
, org_id bigint references orgs(id)
|
||||
, name text
|
||||
, created_at timestamptz default now()
|
||||
, country_id int -- references countries(id)
|
||||
, score bigint generated always as (id + country_id) stored
|
||||
, primary key (org_id, id)
|
||||
);
|
||||
\else
|
||||
-- pg11 and below don't have generated columns, use a normal column
|
||||
create table users (
|
||||
id bigserial
|
||||
, org_id bigint references orgs(id)
|
||||
, name text
|
||||
, created_at timestamptz default now()
|
||||
, country_id int -- references countries(id)
|
||||
, score bigint
|
||||
, primary key (org_id, id)
|
||||
);
|
||||
\endif
|
||||
select create_distributed_table('users', 'org_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
alter table users add constraint fk_user_country foreign key (country_id) references countries(id);
|
||||
create table orders (
|
||||
id bigserial
|
||||
, org_id bigint references orgs(id)
|
||||
, user_id bigint
|
||||
, price int
|
||||
, info jsonb
|
||||
, primary key (org_id, id)
|
||||
, foreign key (org_id, user_id) references users(org_id, id)
|
||||
);
|
||||
select create_distributed_table('orders', 'org_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table events (
|
||||
id bigserial not null
|
||||
, user_id bigint not null
|
||||
, org_id bigint not null
|
||||
, event_time timestamp not null default now()
|
||||
, event_type int not null default 0
|
||||
, payload jsonb
|
||||
, primary key (user_id, id)
|
||||
);
|
||||
create index event_time_idx on events using BRIN (event_time);
|
||||
create index event_json_idx on events using gin(payload);
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't collocate on correctly on org_id
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table local_data(
|
||||
id bigserial primary key
|
||||
, val int default ( (random()*100)::int )
|
||||
);
|
||||
-- data loading takes ~30 seconds, lets hope we can skip this for all reproductions. When
|
||||
-- there is a sqlsmith failure that needs the data we can uncomment the block below.
|
||||
-- insert into orgs(id, name) select i,'org-'||i from generate_series(1,1000) i;
|
||||
-- insert into users(id, name, org_id, country_id) select i,'user-'||i, i%1000+1, (i%90)+1 from generate_series(1,100000) i;
|
||||
-- insert into orders(id, org_id, user_id, price) select i, ((i%100000+1)%1000)+1 , i%100000+1, i/100 from generate_series(1,1000000) i;
|
||||
-- insert into events(id, org_id, user_id, event_type) select i, ((i%100000+1)%1000)+1 , i%100000+1, i/100 from generate_series(1,1000000) i;
|
||||
-- insert into local_data(id) select generate_series(1,1000);
|
||||
commit;
|
||||
-- SQL SMITH ASSERTION FAILURE https://github.com/citusdata/citus/issues/3809
|
||||
-- Root cause: pruned worker columns not projected correctly on coordinator causing an assertion in the postgres standard planner
|
||||
select
|
||||
case when pg_catalog.bit_or(cast(cast(coalesce(cast(null as "bit"), cast(null as "bit")) as "bit") as "bit")) over (partition by subq_0.c3 order by subq_0.c0) <> cast(null as "bit")
|
||||
then subq_0.c3
|
||||
else subq_0.c3
|
||||
end as c0,
|
||||
30 as c1,
|
||||
subq_0.c2 as c2
|
||||
from
|
||||
(select
|
||||
pg_catalog.websearch_to_tsquery(
|
||||
cast(pg_catalog.regconfigin(cast(cast(null as cstring) as cstring)) as regconfig),
|
||||
cast((select type from citus.pg_dist_object limit 1 offset 1) as text)
|
||||
) as c0,
|
||||
sample_0.org_id as c1,
|
||||
sample_0.id as c2,
|
||||
sample_0.score as c3,
|
||||
sample_0.country_id as c4,
|
||||
sample_0.org_id as c5,
|
||||
sample_0.org_id as c6
|
||||
from
|
||||
sqlsmith_failures.users as sample_0 tablesample system (7.5)
|
||||
where sample_0.org_id is not NULL) as subq_0
|
||||
where (select pg_catalog.array_agg(id) from sqlsmith_failures.countries)
|
||||
is not NULL;
|
||||
c0 | c1 | c2
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- cleanup
|
||||
DROP SCHEMA sqlsmith_failures CASCADE;
|
||||
NOTICE: drop cascades to 6 other objects
|
||||
DETAIL: drop cascades to table countries
|
||||
drop cascades to table orgs
|
||||
drop cascades to table users
|
||||
drop cascades to table orders
|
||||
drop cascades to table events
|
||||
drop cascades to table local_data
|
|
@ -509,6 +509,62 @@ GROUP BY a.key ORDER BY 3, 2, 1;
|
|||
key-1 | (key-1,value-2,"Wed Jan 01 00:00:00 2020") | 1
|
||||
(2 rows)
|
||||
|
||||
-- Of the target list entries, v1-v3 should be wrapped in any_value as they do
|
||||
-- not appear in GROUP BY. The append happens on the coordinator in that case.
|
||||
-- Vars in the HAVING that do not appear in the GROUP BY are also wrapped.
|
||||
SELECT
|
||||
a.key as k1,
|
||||
a.key as k2,
|
||||
a.key || '_append' as k3,
|
||||
a.value as v1,
|
||||
a.value as v2,
|
||||
a.value || '_notgrouped' as v3,
|
||||
a.value || '_append' as va1,
|
||||
a.value || '_append' as va2,
|
||||
a.value || '_append' || '_more' as va2,
|
||||
count(*)
|
||||
FROM items a LEFT JOIN other_items b ON (a.key = b.key)
|
||||
GROUP BY a.key, a.value ||'_append'
|
||||
HAVING length(a.key) + length(a.value) < length(a.value || '_append')
|
||||
ORDER BY 1;
|
||||
k1 | k2 | k3 | v1 | v2 | v3 | va1 | va2 | va2 | count
|
||||
---------------------------------------------------------------------
|
||||
key-1 | key-1 | key-1_append | value-2 | value-2 | value-2_notgrouped | value-2_append | value-2_append | value-2_append_more | 1
|
||||
key-2 | key-2 | key-2_append | value-1 | value-1 | value-1_notgrouped | value-1_append | value-1_append | value-1_append_more | 1
|
||||
(2 rows)
|
||||
|
||||
SELECT coordinator_plan($$
|
||||
EXPLAIN (VERBOSE ON, COSTS OFF)
|
||||
SELECT
|
||||
a.key as k1,
|
||||
a.key as k2,
|
||||
a.key || '_append' as k3,
|
||||
a.value as v1,
|
||||
a.value as v2,
|
||||
a.value || '_notgrouped' as v3,
|
||||
a.value || '_append' as va1,
|
||||
a.value || '_append' as va2,
|
||||
a.value || '_append' || '_more' as va3,
|
||||
count(*)
|
||||
FROM items a LEFT JOIN other_items b ON (a.key = b.key)
|
||||
GROUP BY a.key, a.value ||'_append'
|
||||
HAVING length(a.key) + length(a.value) < length(a.value || '_append')
|
||||
ORDER BY 1
|
||||
$$);
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Sort
|
||||
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, (any_value(remote_scan.v1)), (any_value(remote_scan.v2)), ((any_value(remote_scan.v3) || '_notgrouped'::text)), remote_scan.va1, remote_scan.va2, remote_scan.va3, (COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint))
|
||||
Sort Key: remote_scan.k1
|
||||
-> HashAggregate
|
||||
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, any_value(remote_scan.v1), any_value(remote_scan.v2), (any_value(remote_scan.v3) || '_notgrouped'::text), remote_scan.va1, remote_scan.va2, remote_scan.va3, COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)
|
||||
Group Key: remote_scan.k1, remote_scan.va1
|
||||
Filter: ((length(remote_scan.worker_column_11) + length(any_value(remote_scan.worker_column_12))) < length((any_value(remote_scan.worker_column_13) || '_append'::text)))
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, remote_scan.v1, remote_scan.v2, remote_scan.v3, remote_scan.va1, remote_scan.va2, remote_scan.va3, remote_scan.count, remote_scan.worker_column_11, remote_scan.worker_column_12, remote_scan.worker_column_13
|
||||
Task Count: 4
|
||||
(10 rows)
|
||||
|
||||
SELECT a FROM items a ORDER BY key;
|
||||
a
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -19,11 +19,11 @@ SET citus.shard_replication_factor TO 1;
|
|||
SELECT create_distributed_table('partitioning_test', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$subquery_and_partitioning.partitioning_test_2010$$)
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$subquery_and_partitioning.partitioning_test_2017$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -0,0 +1,651 @@
|
|||
--
|
||||
-- TDIGEST_AGGREGATE_SUPPORT
|
||||
-- test the integration of github.com/tvondra/tdigest aggregates into the citus planner
|
||||
-- for push down parts of the aggregate to use parallelized execution and reduced data
|
||||
-- transfer sizes for aggregates not grouped by the distribution column
|
||||
--
|
||||
SET citus.next_shard_id TO 20070000;
|
||||
CREATE SCHEMA tdigest_aggregate_support;
|
||||
SET search_path TO tdigest_aggregate_support, public;
|
||||
-- create the tdigest extension when installed
|
||||
SELECT CASE WHEN COUNT(*) > 0
|
||||
THEN 'CREATE EXTENSION tdigest WITH SCHEMA public'
|
||||
ELSE 'SELECT false AS tdigest_present' END
|
||||
AS create_cmd FROM pg_available_extensions()
|
||||
WHERE name = 'tdigest'
|
||||
\gset
|
||||
:create_cmd;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.coordinator_aggregation_strategy TO 'disabled'; -- prevent aggregate execution when the aggregate can't be pushed down
|
||||
CREATE TABLE latencies (a int, b int, latency double precision);
|
||||
SELECT create_distributed_table('latencies', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT setseed(0.42); -- make the random data inserted deterministic
|
||||
setseed
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO latencies
|
||||
SELECT (random()*20)::int AS a,
|
||||
(random()*20)::int AS b,
|
||||
random()*10000.0 AS latency
|
||||
FROM generate_series(1, 10000);
|
||||
-- explain no grouping to verify partially pushed down for tdigest(value, compression)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest(latency, 100)
|
||||
FROM latencies;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: tdigest(remote_scan.tdigest)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.tdigest
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: tdigest(latency, 100)
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(13 rows)
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest(value, compression)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest(latency, 100)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.a, remote_scan.tdigest
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT a, public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: a, tdigest(latency, 100)
|
||||
Group Key: latencies.a
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(12 rows)
|
||||
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest(value, compression)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest(latency, 100)
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Output: remote_scan.b, tdigest(remote_scan.tdigest)
|
||||
Group Key: remote_scan.b
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.b, remote_scan.tdigest
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT b, public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: b, tdigest(latency, 100)
|
||||
Group Key: latencies.b
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(15 rows)
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(latency, 100, 0.99)
|
||||
FROM latencies;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.tdigest_percentile
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: tdigest(latency, 100)
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(13 rows)
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(latency, 100, 0.99)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.a, remote_scan.tdigest_percentile
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT a, public.tdigest_percentile(latency, 100, '0.99'::double precision) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: a, tdigest_percentile(latency, 100, '0.99'::double precision)
|
||||
Group Key: latencies.a
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(12 rows)
|
||||
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile(latency, 100, 0.99)
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Output: remote_scan.b, tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
|
||||
Group Key: remote_scan.b
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.b, remote_scan.tdigest_percentile
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: b, tdigest(latency, 100)
|
||||
Group Key: latencies.b
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(15 rows)
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(latency, 100, ARRAY[0.99, 0.95])
|
||||
FROM latencies;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.tdigest_percentile
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: tdigest(latency, 100)
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(13 rows)
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(latency, 100, ARRAY[0.99, 0.95])
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.a, remote_scan.tdigest_percentile
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT a, public.tdigest_percentile(latency, 100, '{0.99,0.95}'::double precision[]) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: a, tdigest_percentile(latency, 100, '{0.99,0.95}'::double precision[])
|
||||
Group Key: latencies.a
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(12 rows)
|
||||
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile(latency, 100, ARRAY[0.99, 0.95])
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Output: remote_scan.b, tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
|
||||
Group Key: remote_scan.b
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.b, remote_scan.tdigest_percentile
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: b, tdigest(latency, 100)
|
||||
Group Key: latencies.b
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(15 rows)
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(latency, 100, 9000)
|
||||
FROM latencies;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.tdigest_percentile_of
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: tdigest(latency, 100)
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(13 rows)
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(latency, 100, 9000)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.a, remote_scan.tdigest_percentile_of
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT a, public.tdigest_percentile_of(latency, 100, '9000'::double precision) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: a, tdigest_percentile_of(latency, 100, '9000'::double precision)
|
||||
Group Key: latencies.a
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(12 rows)
|
||||
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile_of(latency, 100, 9000)
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Output: remote_scan.b, tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
|
||||
Group Key: remote_scan.b
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.b, remote_scan.tdigest_percentile_of
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: b, tdigest(latency, 100)
|
||||
Group Key: latencies.b
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(15 rows)
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(latency, 100, ARRAY[9000, 9500])
|
||||
FROM latencies;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.tdigest_percentile_of
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: tdigest(latency, 100)
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(13 rows)
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(latency, 100, ARRAY[9000, 9500])
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.a, remote_scan.tdigest_percentile_of
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT a, public.tdigest_percentile_of(latency, 100, '{9000,9500}'::double precision[]) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: a, tdigest_percentile_of(latency, 100, '{9000,9500}'::double precision[])
|
||||
Group Key: latencies.a
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(12 rows)
|
||||
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile_of(latency, 100, ARRAY[9000, 9500])
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Output: remote_scan.b, tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
|
||||
Group Key: remote_scan.b
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.b, remote_scan.tdigest_percentile_of
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: b, tdigest(latency, 100)
|
||||
Group Key: latencies.b
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
|
||||
Output: a, b, latency
|
||||
(15 rows)
|
||||
|
||||
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
|
||||
SELECT tdigest(latency, 100) FROM latencies;
|
||||
tdigest
|
||||
---------------------------------------------------------------------
|
||||
flags 0 count 10000 compression 100 centroids 46 (0.287235, 1) (1.025106, 1) (2.058216, 1) (5.335597, 1) (12.707263, 2) (25.302479, 3) (43.435063, 4) (77.987860, 5) (269.478664, 10) (509.417419, 13) (1227.158879, 22) (3408.256171, 35) (7772.721988, 55) (13840.275516, 65) (32937.127607, 108) (64476.403332, 148) (118260.230644, 199) (239584.293240, 292) (562119.836766, 463) (944722.686313, 547) (1751089.620493, 749) (3751264.745959, 1128) (5877270.108576, 1300) (6224557.402567, 1104) (5804999.258033, 874) (5632316.697114, 755) (4648651.050740, 573) (3460055.227950, 402) (2820271.404686, 314) (2676501.012955, 288) (1649845.166017, 173) (1269335.942008, 131) (813964.853243, 83) (484144.878702, 49) (337179.763016, 34) (198775.241901, 20) (149353.499704, 15) (109688.319223, 11) (79855.926155, 8) (49937.731689, 5) (29971.046175, 3) (19982.538737, 2) (9991.467422, 1) (9992.337047, 1) (9995.578357, 1) (9999.700339, 1)
|
||||
(1 row)
|
||||
|
||||
SELECT tdigest_percentile(latency, 100, 0.99) FROM latencies;
|
||||
tdigest_percentile
|
||||
---------------------------------------------------------------------
|
||||
9904.28342426494
|
||||
(1 row)
|
||||
|
||||
SELECT tdigest_percentile(latency, 100, ARRAY[0.99, 0.95]) FROM latencies;
|
||||
tdigest_percentile
|
||||
---------------------------------------------------------------------
|
||||
{9904.28342426494,9485.49009399385}
|
||||
(1 row)
|
||||
|
||||
SELECT tdigest_percentile_of(latency, 100, 9000) FROM latencies;
|
||||
tdigest_percentile_of
|
||||
---------------------------------------------------------------------
|
||||
0.903462047211138
|
||||
(1 row)
|
||||
|
||||
SELECT tdigest_percentile_of(latency, 100, ARRAY[9000, 9500]) FROM latencies;
|
||||
tdigest_percentile_of
|
||||
---------------------------------------------------------------------
|
||||
{0.903462047211138,0.95137481812975}
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE latencies_rollup (a int, tdigest tdigest);
|
||||
SELECT create_distributed_table('latencies_rollup', 'a', colocate_with => 'latencies');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO latencies_rollup
|
||||
SELECT a, tdigest(latency, 100)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest(tdigest)
|
||||
FROM latencies_rollup;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: tdigest(remote_scan.tdigest)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.tdigest
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT public.tdigest(tdigest) AS tdigest FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: tdigest(tdigest)
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||
Output: a, tdigest
|
||||
(13 rows)
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest(tdigest)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest(tdigest)
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.a, remote_scan.tdigest
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT a, public.tdigest(tdigest) AS tdigest FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: a, tdigest(tdigest)
|
||||
Group Key: latencies_rollup.a
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||
Output: a, tdigest
|
||||
(12 rows)
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(tdigest, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(tdigest, 0.99)
|
||||
FROM latencies_rollup;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.tdigest_percentile
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: tdigest(tdigest)
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||
Output: a, tdigest
|
||||
(13 rows)
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(tdigest, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(tdigest, 0.99)
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.a, remote_scan.tdigest_percentile
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT a, public.tdigest_percentile(tdigest, '0.99'::double precision) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: a, tdigest_percentile(tdigest, '0.99'::double precision)
|
||||
Group Key: latencies_rollup.a
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||
Output: a, tdigest
|
||||
(12 rows)
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(tdigest, ARRAY[0.99, 0.95])
|
||||
FROM latencies_rollup;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.tdigest_percentile
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: tdigest(tdigest)
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||
Output: a, tdigest
|
||||
(13 rows)
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(tdigest, ARRAY[0.99, 0.95])
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.a, remote_scan.tdigest_percentile
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT a, public.tdigest_percentile(tdigest, '{0.99,0.95}'::double precision[]) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: a, tdigest_percentile(tdigest, '{0.99,0.95}'::double precision[])
|
||||
Group Key: latencies_rollup.a
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||
Output: a, tdigest
|
||||
(12 rows)
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(tdigest, 9000)
|
||||
FROM latencies_rollup;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.tdigest_percentile_of
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: tdigest(tdigest)
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||
Output: a, tdigest
|
||||
(13 rows)
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(tdigest, 9000)
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.a, remote_scan.tdigest_percentile_of
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT a, public.tdigest_percentile_of(tdigest, '9000'::double precision) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: a, tdigest_percentile_of(tdigest, '9000'::double precision)
|
||||
Group Key: latencies_rollup.a
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||
Output: a, tdigest
|
||||
(12 rows)
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(tdigest, ARRAY[9000, 9500])
|
||||
FROM latencies_rollup;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.tdigest_percentile_of
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: tdigest(tdigest)
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||
Output: a, tdigest
|
||||
(13 rows)
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(tdigest, ARRAY[9000, 9500])
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: remote_scan.a, remote_scan.tdigest_percentile_of
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT a, public.tdigest_percentile_of(tdigest, '{9000,9500}'::double precision[]) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: a, tdigest_percentile_of(tdigest, '{9000,9500}'::double precision[])
|
||||
Group Key: latencies_rollup.a
|
||||
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
|
||||
Output: a, tdigest
|
||||
(12 rows)
|
||||
|
||||
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
|
||||
SELECT tdigest(tdigest) FROM latencies_rollup;
|
||||
tdigest
|
||||
---------------------------------------------------------------------
|
||||
flags 0 count 10000 compression 100 centroids 47 (0.287235, 1) (1.025106, 1) (2.058216, 1) (5.335597, 1) (12.707263, 2) (25.302479, 3) (43.435063, 4) (77.987860, 5) (241.681030, 9) (402.696604, 11) (999.675875, 20) (2310.848640, 27) (4374.387978, 37) (9722.896547, 56) (21713.805492, 87) (39735.065966, 112) (87335.860853, 177) (182744.906162, 262) (336766.886786, 338) (661263.339724, 464) (1228663.222377, 623) (2146097.038498, 805) (2854487.701653, 827) (5292830.156590, 1195) (6168185.834602, 1104) (6399734.303813, 966) (5778088.854724, 773) (5213381.984997, 637) (3763042.148296, 431) (3036786.646485, 333) (1948238.134602, 207) (1456568.605821, 152) (999888.715345, 103) (715935.892988, 73) (543464.906535, 55) (327339.982973, 33) (198853.838033, 20) (159362.743852, 16) (79807.827301, 8) (69877.414438, 7) (49937.731689, 5) (29971.046175, 3) (19982.538737, 2) (9991.467422, 1) (9992.337047, 1) (9995.578357, 1) (9999.700339, 1)
|
||||
(1 row)
|
||||
|
||||
SELECT tdigest_percentile(tdigest, 0.99) FROM latencies_rollup;
|
||||
tdigest_percentile
|
||||
---------------------------------------------------------------------
|
||||
9903.76070790358
|
||||
(1 row)
|
||||
|
||||
SELECT tdigest_percentile(tdigest, ARRAY[0.99, 0.95]) FROM latencies_rollup;
|
||||
tdigest_percentile
|
||||
---------------------------------------------------------------------
|
||||
{9903.76070790358,9492.7106302226}
|
||||
(1 row)
|
||||
|
||||
SELECT tdigest_percentile_of(tdigest, 9000) FROM latencies_rollup;
|
||||
tdigest_percentile_of
|
||||
---------------------------------------------------------------------
|
||||
0.902852659582396
|
||||
(1 row)
|
||||
|
||||
SELECT tdigest_percentile_of(tdigest, ARRAY[9000, 9500]) FROM latencies_rollup;
|
||||
tdigest_percentile_of
|
||||
---------------------------------------------------------------------
|
||||
{0.902852659582396,0.950865574659141}
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO WARNING; -- suppress cascade messages
|
||||
DROP SCHEMA tdigest_aggregate_support CASCADE;
|
|
@ -0,0 +1,234 @@
|
|||
--
|
||||
-- TDIGEST_AGGREGATE_SUPPORT
|
||||
-- test the integration of github.com/tvondra/tdigest aggregates into the citus planner
|
||||
-- for push down parts of the aggregate to use parallelized execution and reduced data
|
||||
-- transfer sizes for aggregates not grouped by the distribution column
|
||||
--
|
||||
SET citus.next_shard_id TO 20070000;
|
||||
CREATE SCHEMA tdigest_aggregate_support;
|
||||
SET search_path TO tdigest_aggregate_support, public;
|
||||
-- create the tdigest extension when installed
|
||||
SELECT CASE WHEN COUNT(*) > 0
|
||||
THEN 'CREATE EXTENSION tdigest WITH SCHEMA public'
|
||||
ELSE 'SELECT false AS tdigest_present' END
|
||||
AS create_cmd FROM pg_available_extensions()
|
||||
WHERE name = 'tdigest'
|
||||
\gset
|
||||
:create_cmd;
|
||||
tdigest_present
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.coordinator_aggregation_strategy TO 'disabled'; -- prevent aggregate execution when the aggregate can't be pushed down
|
||||
CREATE TABLE latencies (a int, b int, latency double precision);
|
||||
SELECT create_distributed_table('latencies', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT setseed(0.42); -- make the random data inserted deterministic
|
||||
setseed
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO latencies
|
||||
SELECT (random()*20)::int AS a,
|
||||
(random()*20)::int AS b,
|
||||
random()*10000.0 AS latency
|
||||
FROM generate_series(1, 10000);
|
||||
-- explain no grouping to verify partially pushed down for tdigest(value, compression)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest(latency, 100)
|
||||
FROM latencies;
|
||||
ERROR: function tdigest(double precision, integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest(value, compression)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest(latency, 100)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
ERROR: function tdigest(double precision, integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest(value, compression)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest(latency, 100)
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
ERROR: function tdigest(double precision, integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(latency, 100, 0.99)
|
||||
FROM latencies;
|
||||
ERROR: function tdigest_percentile(double precision, integer, numeric) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(latency, 100, 0.99)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
ERROR: function tdigest_percentile(double precision, integer, numeric) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile(latency, 100, 0.99)
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
ERROR: function tdigest_percentile(double precision, integer, numeric) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(latency, 100, ARRAY[0.99, 0.95])
|
||||
FROM latencies;
|
||||
ERROR: function tdigest_percentile(double precision, integer, numeric[]) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(latency, 100, ARRAY[0.99, 0.95])
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
ERROR: function tdigest_percentile(double precision, integer, numeric[]) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile(latency, 100, ARRAY[0.99, 0.95])
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
ERROR: function tdigest_percentile(double precision, integer, numeric[]) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(latency, 100, 9000)
|
||||
FROM latencies;
|
||||
ERROR: function tdigest_percentile_of(double precision, integer, integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(latency, 100, 9000)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
ERROR: function tdigest_percentile_of(double precision, integer, integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile_of(latency, 100, 9000)
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
ERROR: function tdigest_percentile_of(double precision, integer, integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(latency, 100, ARRAY[9000, 9500])
|
||||
FROM latencies;
|
||||
ERROR: function tdigest_percentile_of(double precision, integer, integer[]) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(latency, 100, ARRAY[9000, 9500])
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
ERROR: function tdigest_percentile_of(double precision, integer, integer[]) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile_of(latency, 100, ARRAY[9000, 9500])
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
ERROR: function tdigest_percentile_of(double precision, integer, integer[]) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
|
||||
SELECT tdigest(latency, 100) FROM latencies;
|
||||
ERROR: function tdigest(double precision, integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
SELECT tdigest_percentile(latency, 100, 0.99) FROM latencies;
|
||||
ERROR: function tdigest_percentile(double precision, integer, numeric) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
SELECT tdigest_percentile(latency, 100, ARRAY[0.99, 0.95]) FROM latencies;
|
||||
ERROR: function tdigest_percentile(double precision, integer, numeric[]) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
SELECT tdigest_percentile_of(latency, 100, 9000) FROM latencies;
|
||||
ERROR: function tdigest_percentile_of(double precision, integer, integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
SELECT tdigest_percentile_of(latency, 100, ARRAY[9000, 9500]) FROM latencies;
|
||||
ERROR: function tdigest_percentile_of(double precision, integer, integer[]) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
CREATE TABLE latencies_rollup (a int, tdigest tdigest);
|
||||
ERROR: type "tdigest" does not exist
|
||||
SELECT create_distributed_table('latencies_rollup', 'a', colocate_with => 'latencies');
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
INSERT INTO latencies_rollup
|
||||
SELECT a, tdigest(latency, 100)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest(tdigest)
|
||||
FROM latencies_rollup;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest(tdigest)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest(tdigest)
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(tdigest, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(tdigest, 0.99)
|
||||
FROM latencies_rollup;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(tdigest, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(tdigest, 0.99)
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(tdigest, ARRAY[0.99, 0.95])
|
||||
FROM latencies_rollup;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(tdigest, ARRAY[0.99, 0.95])
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(tdigest, 9000)
|
||||
FROM latencies_rollup;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(tdigest, 9000)
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(tdigest, ARRAY[9000, 9500])
|
||||
FROM latencies_rollup;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(tdigest, ARRAY[9000, 9500])
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
|
||||
SELECT tdigest(tdigest) FROM latencies_rollup;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
SELECT tdigest_percentile(tdigest, 0.99) FROM latencies_rollup;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
SELECT tdigest_percentile(tdigest, ARRAY[0.99, 0.95]) FROM latencies_rollup;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
SELECT tdigest_percentile_of(tdigest, 9000) FROM latencies_rollup;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
SELECT tdigest_percentile_of(tdigest, ARRAY[9000, 9500]) FROM latencies_rollup;
|
||||
ERROR: relation "latencies_rollup" does not exist
|
||||
SET client_min_messages TO WARNING; -- suppress cascade messages
|
||||
DROP SCHEMA tdigest_aggregate_support CASCADE;
|
|
@ -208,7 +208,7 @@ SELECT * FROM t2 ORDER BY a;
|
|||
SELECT create_distributed_table('t2', 'a');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$upgrade_basic.t2$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -16,11 +16,11 @@ INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03');
|
|||
SELECT create_distributed_table('with_partitioning.partitioning_test', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$with_partitioning.partitioning_test_2010$$)
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$with_partitioning.partitioning_test_2017$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -93,7 +93,7 @@ test: multi_subquery_in_where_reference_clause full_join adaptive_executor propa
|
|||
test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc
|
||||
test: multi_agg_distinct multi_agg_approximate_distinct multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql
|
||||
test: multi_reference_table multi_select_for_update relation_access_tracking
|
||||
test: custom_aggregate_support aggregate_support
|
||||
test: custom_aggregate_support aggregate_support tdigest_aggregate_support
|
||||
test: multi_average_expression multi_working_columns multi_having_pushdown having_subquery
|
||||
test: multi_array_agg multi_limit_clause multi_orderby_limit_pushdown
|
||||
test: multi_jsonb_agg multi_jsonb_object_agg multi_json_agg multi_json_object_agg bool_agg ch_bench_having chbenchmark_all_queries expression_reference_join
|
||||
|
@ -361,3 +361,8 @@ test: ensure_no_intermediate_data_leak
|
|||
# in the shared memory
|
||||
# --------
|
||||
test: ensure_no_shared_connection_leak
|
||||
|
||||
# ---------
|
||||
# run queries generated by sql smith that caused issues in the past
|
||||
# --------
|
||||
test: sqlsmith_failures
|
||||
|
|
|
@ -206,7 +206,7 @@ CREATE TABLE impressions (
|
|||
SELECT create_distributed_table('companies', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.companies$$)
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
@ -216,7 +216,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
SELECT create_distributed_table('campaigns', 'company_id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.campaigns$$)
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
@ -226,7 +226,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
SELECT create_distributed_table('ads', 'company_id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.ads$$)
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
@ -236,7 +236,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
SELECT create_distributed_table('clicks', 'company_id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.clicks$$)
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
@ -246,7 +246,7 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
|
|||
SELECT create_distributed_table('impressions', 'company_id');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.impressions$$)
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
|
|
@ -981,7 +981,7 @@ SELECT 1 AS a, s AS b, s AS c, s AS d, s AS e, s AS f, s AS g, s AS h FROM gener
|
|||
SELECT create_distributed_table('trigger_flush','a');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is longer visible, but is still on disk.
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.trigger_flush$$)
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -155,11 +155,29 @@ SELECT * FROM ref JOIN local ON (a = x);
|
|||
-- in postgres we wouldn't see this modifying cte, so it is consistent with postgres.
|
||||
WITH a AS (SELECT count(*) FROM test), b AS (INSERT INTO local VALUES (3,2) RETURNING *), c AS (INSERT INTO ref VALUES (3,2) RETURNING *), d AS (SELECT count(*) FROM ref JOIN local ON (a = x)) SELECT * FROM a, b, c, d ORDER BY x,y,a,b;
|
||||
|
||||
-- issue #3801
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE dist_table(a int);
|
||||
SELECT create_distributed_table('dist_table', 'a');
|
||||
BEGIN;
|
||||
-- this will use perPlacementQueryStrings, make sure it works correctly with
|
||||
-- copying task
|
||||
INSERT INTO dist_table SELECT a + 1 FROM dist_table;
|
||||
ROLLBACK;
|
||||
|
||||
BEGIN;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE dist_table1(a int);
|
||||
-- this will use queryStringList, make sure it works correctly with
|
||||
-- copying task
|
||||
SELECT create_distributed_table('dist_table1', 'a');
|
||||
ROLLBACK;
|
||||
|
||||
RESET citus.enable_cte_inlining;
|
||||
|
||||
DELETE FROM test;
|
||||
DROP TABLE test;
|
||||
DROP TABLE dist_table;
|
||||
|
||||
DROP SCHEMA coordinator_shouldhaveshards CASCADE;
|
||||
|
||||
|
|
|
@ -846,7 +846,7 @@ RESET citus.log_local_commands;
|
|||
|
||||
\c - - - :master_port
|
||||
SET citus.next_shard_id TO 1480000;
|
||||
-- local execution with custom type
|
||||
-- test both local and remote execution with custom type
|
||||
SET citus.replication_model TO "streaming";
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TYPE invite_resp AS ENUM ('yes', 'no', 'maybe');
|
||||
|
@ -860,6 +860,105 @@ CREATE TABLE event_responses (
|
|||
|
||||
SELECT create_distributed_table('event_responses', 'event_id');
|
||||
|
||||
INSERT INTO event_responses VALUES (1, 1, 'yes'), (2, 2, 'yes'), (3, 3, 'no'), (4, 4, 'no');
|
||||
|
||||
CREATE OR REPLACE FUNCTION regular_func(p invite_resp)
|
||||
RETURNS int AS $$
|
||||
DECLARE
|
||||
q1Result INT;
|
||||
q2Result INT;
|
||||
q3Result INT;
|
||||
BEGIN
|
||||
SELECT count(*) INTO q1Result FROM event_responses WHERE response = $1;
|
||||
SELECT count(*) INTO q2Result FROM event_responses e1 LEFT JOIN event_responses e2 USING (event_id) WHERE e2.response = $1;
|
||||
SELECT count(*) INTO q3Result FROM (SELECT * FROM event_responses WHERE response = $1 LIMIT 5) as foo;
|
||||
RETURN q3Result+q2Result+q1Result;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
SELECT regular_func('yes');
|
||||
SELECT regular_func('yes');
|
||||
SELECT regular_func('yes');
|
||||
SELECT regular_func('yes');
|
||||
SELECT regular_func('yes');
|
||||
SELECT regular_func('yes');
|
||||
SELECT regular_func('yes');
|
||||
SELECT regular_func('yes');
|
||||
|
||||
CREATE OR REPLACE PROCEDURE regular_procedure(p invite_resp)
|
||||
AS $$
|
||||
BEGIN
|
||||
PERFORM * FROM event_responses WHERE response = $1;
|
||||
PERFORM * FROM event_responses e1 LEFT JOIN event_responses e2 USING (event_id) WHERE e2.response = $1;
|
||||
PERFORM * FROM (SELECT * FROM event_responses WHERE response = $1 LIMIT 5) as foo;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
CALL regular_procedure('no');
|
||||
|
||||
PREPARE multi_shard_no_dist_key(invite_resp) AS select * from event_responses where response = $1::invite_resp LIMIT 1;
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
EXECUTE multi_shard_no_dist_key('yes');
|
||||
|
||||
PREPARE multi_shard_with_dist_key(int, invite_resp) AS select * from event_responses where event_id > $1 AND response = $2::invite_resp LIMIT 1;
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
EXECUTE multi_shard_with_dist_key(1, 'yes');
|
||||
|
||||
PREPARE query_pushdown_no_dist_key(invite_resp) AS select * from event_responses e1 LEFT JOIN event_responses e2 USING(event_id) where e1.response = $1::invite_resp LIMIT 1;
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
EXECUTE query_pushdown_no_dist_key('yes');
|
||||
|
||||
PREPARE insert_select_via_coord(invite_resp) AS INSERT INTO event_responses SELECT * FROM event_responses where response = $1::invite_resp LIMIT 1 ON CONFLICT (event_id, user_id) DO NOTHING ;
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
EXECUTE insert_select_via_coord('yes');
|
||||
|
||||
PREPARE insert_select_pushdown(invite_resp) AS INSERT INTO event_responses SELECT * FROM event_responses where response = $1::invite_resp ON CONFLICT (event_id, user_id) DO NOTHING;
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
EXECUTE insert_select_pushdown('yes');
|
||||
|
||||
PREPARE router_select_with_no_dist_key_filter(invite_resp) AS select * from event_responses where event_id = 1 AND response = $1::invite_resp LIMIT 1;
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
EXECUTE router_select_with_no_dist_key_filter('yes');
|
||||
|
||||
-- rest of the tests assume the table is empty
|
||||
TRUNCATE event_responses;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE register_for_event(p_event_id int, p_user_id int, p_choice invite_resp)
|
||||
LANGUAGE plpgsql AS $fn$
|
||||
BEGIN
|
||||
|
|
|
@ -177,5 +177,46 @@ EXECUTE coerce_numeric_2(1);
|
|||
EXECUTE coerce_numeric_2(1);
|
||||
|
||||
|
||||
-- Test that we can insert an integer literal into a numeric column as well
|
||||
CREATE TABLE numeric_test (id numeric(6, 1), val int);
|
||||
SELECT create_distributed_table('numeric_test', 'id');
|
||||
|
||||
INSERT INTO numeric_test VALUES (21, 87) RETURNING *;
|
||||
SELECT * FROM numeric_test WHERE id = 21;
|
||||
SELECT * FROM numeric_test WHERE id = 21::int;
|
||||
SELECT * FROM numeric_test WHERE id = 21::bigint;
|
||||
SELECT * FROM numeric_test WHERE id = 21.0;
|
||||
SELECT * FROM numeric_test WHERE id = 21.0::numeric;
|
||||
|
||||
PREPARE insert_p(int) AS INSERT INTO numeric_test VALUES ($1, 87) RETURNING *;
|
||||
EXECUTE insert_p(1);
|
||||
EXECUTE insert_p(2);
|
||||
EXECUTE insert_p(3);
|
||||
EXECUTE insert_p(4);
|
||||
EXECUTE insert_p(5);
|
||||
EXECUTE insert_p(6);
|
||||
|
||||
PREPARE select_p(int) AS SELECT * FROM numeric_test WHERE id=$1;
|
||||
EXECUTE select_p(1);
|
||||
EXECUTE select_p(2);
|
||||
EXECUTE select_p(3);
|
||||
EXECUTE select_p(4);
|
||||
EXECUTE select_p(5);
|
||||
EXECUTE select_p(6);
|
||||
|
||||
SET citus.enable_fast_path_router_planner TO false;
|
||||
EXECUTE select_p(1);
|
||||
EXECUTE select_p(2);
|
||||
EXECUTE select_p(3);
|
||||
EXECUTE select_p(4);
|
||||
EXECUTE select_p(5);
|
||||
EXECUTE select_p(6);
|
||||
|
||||
-- make sure that we don't return wrong resuls
|
||||
INSERT INTO numeric_test VALUES (21.1, 87) RETURNING *;
|
||||
SELECT * FROM numeric_test WHERE id = 21;
|
||||
SELECT * FROM numeric_test WHERE id = 21::numeric;
|
||||
SELECT * FROM numeric_test WHERE id = 21.1::numeric;
|
||||
|
||||
SET search_path TO public;
|
||||
DROP SCHEMA prune_shard_list CASCADE;
|
||||
|
|
|
@ -1012,6 +1012,11 @@ EXPLAIN (COSTS OFF) SELECT value_1, count(*) FROM colocated_table_test GROUP BY
|
|||
HAVING (SELECT rt.value_2 FROM reference_table_test rt where rt.value_2 = 2) > 0
|
||||
ORDER BY 1;
|
||||
|
||||
WITH a as (SELECT rt.value_2 FROM reference_table_test rt where rt.value_2 = 2)
|
||||
SELECT ct.value_1, count(*) FROM colocated_table_test ct join a on ct.value_1 = a.value_2
|
||||
WHERE exists (select * from a)
|
||||
GROUP BY 1 ORDER BY 1;
|
||||
|
||||
-- clean up tables, ...
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SEQUENCE example_ref_value_seq;
|
||||
|
|
|
@ -43,12 +43,18 @@ insert into gen2 (id, val1) values (1,4),(3,6),(5,2),(7,2);
|
|||
select create_distributed_table('gen1', 'id');
|
||||
select create_distributed_table('gen2', 'val2');
|
||||
|
||||
copy gen1 to :'temp_dir''pg12_copy_test_generated';
|
||||
|
||||
insert into gen1 (id, val1) values (2,4),(4,6),(6,2),(8,2);
|
||||
insert into gen2 (id, val1) values (2,4),(4,6),(6,2),(8,2);
|
||||
|
||||
select * from gen1 order by 1,2,3;
|
||||
select * from gen2 order by 1,2,3;
|
||||
|
||||
truncate gen1;
|
||||
copy gen1 from :'temp_dir''pg12_copy_test_generated';
|
||||
select * from gen1 order by 1,2,3;
|
||||
|
||||
-- Test new VACUUM/ANALYZE options
|
||||
analyze (skip_locked) gen1;
|
||||
vacuum (skip_locked) gen1;
|
||||
|
|
|
@ -0,0 +1,126 @@
|
|||
-- setup schema used for sqlsmith runs
|
||||
-- source: https://gist.github.com/will/e8a1e6efd46ac82f1b61d0c0ccab1b52
|
||||
CREATE SCHEMA sqlsmith_failures;
|
||||
SET search_path TO sqlsmith_failures, public;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 1280000;
|
||||
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
|
||||
\gset
|
||||
|
||||
begin;
|
||||
|
||||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
|
||||
create table countries(
|
||||
id serial primary key
|
||||
, name text
|
||||
, code varchar(2) collate "C" unique
|
||||
);
|
||||
insert into countries(name, code) select 'country-'||i, i::text from generate_series(10,99) i;
|
||||
select create_reference_table('countries');
|
||||
|
||||
create table orgs (
|
||||
id bigserial primary key
|
||||
, name text
|
||||
, created_at timestamptz default now()
|
||||
);
|
||||
select create_distributed_table('orgs', 'id');
|
||||
|
||||
\if :server_version_above_eleven
|
||||
-- pg12 and above support generated columns
|
||||
create table users (
|
||||
id bigserial
|
||||
, org_id bigint references orgs(id)
|
||||
, name text
|
||||
, created_at timestamptz default now()
|
||||
, country_id int -- references countries(id)
|
||||
, score bigint generated always as (id + country_id) stored
|
||||
, primary key (org_id, id)
|
||||
);
|
||||
\else
|
||||
-- pg11 and below don't have generated columns, use a normal column
|
||||
create table users (
|
||||
id bigserial
|
||||
, org_id bigint references orgs(id)
|
||||
, name text
|
||||
, created_at timestamptz default now()
|
||||
, country_id int -- references countries(id)
|
||||
, score bigint
|
||||
, primary key (org_id, id)
|
||||
);
|
||||
\endif
|
||||
|
||||
select create_distributed_table('users', 'org_id');
|
||||
alter table users add constraint fk_user_country foreign key (country_id) references countries(id);
|
||||
|
||||
create table orders (
|
||||
id bigserial
|
||||
, org_id bigint references orgs(id)
|
||||
, user_id bigint
|
||||
, price int
|
||||
, info jsonb
|
||||
, primary key (org_id, id)
|
||||
, foreign key (org_id, user_id) references users(org_id, id)
|
||||
);
|
||||
select create_distributed_table('orders', 'org_id');
|
||||
|
||||
create table events (
|
||||
id bigserial not null
|
||||
, user_id bigint not null
|
||||
, org_id bigint not null
|
||||
, event_time timestamp not null default now()
|
||||
, event_type int not null default 0
|
||||
, payload jsonb
|
||||
, primary key (user_id, id)
|
||||
);
|
||||
create index event_time_idx on events using BRIN (event_time);
|
||||
create index event_json_idx on events using gin(payload);
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't collocate on correctly on org_id
|
||||
|
||||
create table local_data(
|
||||
id bigserial primary key
|
||||
, val int default ( (random()*100)::int )
|
||||
);
|
||||
|
||||
-- data loading takes ~30 seconds, lets hope we can skip this for all reproductions. When
|
||||
-- there is a sqlsmith failure that needs the data we can uncomment the block below.
|
||||
|
||||
-- insert into orgs(id, name) select i,'org-'||i from generate_series(1,1000) i;
|
||||
-- insert into users(id, name, org_id, country_id) select i,'user-'||i, i%1000+1, (i%90)+1 from generate_series(1,100000) i;
|
||||
-- insert into orders(id, org_id, user_id, price) select i, ((i%100000+1)%1000)+1 , i%100000+1, i/100 from generate_series(1,1000000) i;
|
||||
-- insert into events(id, org_id, user_id, event_type) select i, ((i%100000+1)%1000)+1 , i%100000+1, i/100 from generate_series(1,1000000) i;
|
||||
-- insert into local_data(id) select generate_series(1,1000);
|
||||
|
||||
commit;
|
||||
|
||||
-- SQL SMITH ASSERTION FAILURE https://github.com/citusdata/citus/issues/3809
|
||||
-- Root cause: pruned worker columns not projected correctly on coordinator causing an assertion in the postgres standard planner
|
||||
select
|
||||
case when pg_catalog.bit_or(cast(cast(coalesce(cast(null as "bit"), cast(null as "bit")) as "bit") as "bit")) over (partition by subq_0.c3 order by subq_0.c0) <> cast(null as "bit")
|
||||
then subq_0.c3
|
||||
else subq_0.c3
|
||||
end as c0,
|
||||
30 as c1,
|
||||
subq_0.c2 as c2
|
||||
from
|
||||
(select
|
||||
pg_catalog.websearch_to_tsquery(
|
||||
cast(pg_catalog.regconfigin(cast(cast(null as cstring) as cstring)) as regconfig),
|
||||
cast((select type from citus.pg_dist_object limit 1 offset 1) as text)
|
||||
) as c0,
|
||||
sample_0.org_id as c1,
|
||||
sample_0.id as c2,
|
||||
sample_0.score as c3,
|
||||
sample_0.country_id as c4,
|
||||
sample_0.org_id as c5,
|
||||
sample_0.org_id as c6
|
||||
from
|
||||
sqlsmith_failures.users as sample_0 tablesample system (7.5)
|
||||
where sample_0.org_id is not NULL) as subq_0
|
||||
where (select pg_catalog.array_agg(id) from sqlsmith_failures.countries)
|
||||
is not NULL;
|
||||
|
||||
-- cleanup
|
||||
DROP SCHEMA sqlsmith_failures CASCADE;
|
|
@ -351,6 +351,43 @@ SELECT a.key, a, count(b)
|
|||
FROM items a LEFT JOIN other_items b ON (a.key = b.key)
|
||||
GROUP BY a.key ORDER BY 3, 2, 1;
|
||||
|
||||
-- Of the target list entries, v1-v3 should be wrapped in any_value as they do
|
||||
-- not appear in GROUP BY. The append happens on the coordinator in that case.
|
||||
-- Vars in the HAVING that do not appear in the GROUP BY are also wrapped.
|
||||
SELECT
|
||||
a.key as k1,
|
||||
a.key as k2,
|
||||
a.key || '_append' as k3,
|
||||
a.value as v1,
|
||||
a.value as v2,
|
||||
a.value || '_notgrouped' as v3,
|
||||
a.value || '_append' as va1,
|
||||
a.value || '_append' as va2,
|
||||
a.value || '_append' || '_more' as va2,
|
||||
count(*)
|
||||
FROM items a LEFT JOIN other_items b ON (a.key = b.key)
|
||||
GROUP BY a.key, a.value ||'_append'
|
||||
HAVING length(a.key) + length(a.value) < length(a.value || '_append')
|
||||
ORDER BY 1;
|
||||
|
||||
SELECT coordinator_plan($$
|
||||
EXPLAIN (VERBOSE ON, COSTS OFF)
|
||||
SELECT
|
||||
a.key as k1,
|
||||
a.key as k2,
|
||||
a.key || '_append' as k3,
|
||||
a.value as v1,
|
||||
a.value as v2,
|
||||
a.value || '_notgrouped' as v3,
|
||||
a.value || '_append' as va1,
|
||||
a.value || '_append' as va2,
|
||||
a.value || '_append' || '_more' as va3,
|
||||
count(*)
|
||||
FROM items a LEFT JOIN other_items b ON (a.key = b.key)
|
||||
GROUP BY a.key, a.value ||'_append'
|
||||
HAVING length(a.key) + length(a.value) < length(a.value || '_append')
|
||||
ORDER BY 1
|
||||
$$);
|
||||
SELECT a FROM items a ORDER BY key;
|
||||
SELECT a FROM items a WHERE key = 'key-1';
|
||||
SELECT a FROM (SELECT a, random() FROM items a) b ORDER BY a;
|
||||
|
|
|
@ -0,0 +1,195 @@
|
|||
--
|
||||
-- TDIGEST_AGGREGATE_SUPPORT
|
||||
-- test the integration of github.com/tvondra/tdigest aggregates into the citus planner
|
||||
-- for push down parts of the aggregate to use parallelized execution and reduced data
|
||||
-- transfer sizes for aggregates not grouped by the distribution column
|
||||
--
|
||||
|
||||
SET citus.next_shard_id TO 20070000;
|
||||
CREATE SCHEMA tdigest_aggregate_support;
|
||||
SET search_path TO tdigest_aggregate_support, public;
|
||||
|
||||
-- create the tdigest extension when installed
|
||||
SELECT CASE WHEN COUNT(*) > 0
|
||||
THEN 'CREATE EXTENSION tdigest WITH SCHEMA public'
|
||||
ELSE 'SELECT false AS tdigest_present' END
|
||||
AS create_cmd FROM pg_available_extensions()
|
||||
WHERE name = 'tdigest'
|
||||
\gset
|
||||
:create_cmd;
|
||||
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.coordinator_aggregation_strategy TO 'disabled'; -- prevent aggregate execution when the aggregate can't be pushed down
|
||||
|
||||
CREATE TABLE latencies (a int, b int, latency double precision);
|
||||
SELECT create_distributed_table('latencies', 'a');
|
||||
SELECT setseed(0.42); -- make the random data inserted deterministic
|
||||
INSERT INTO latencies
|
||||
SELECT (random()*20)::int AS a,
|
||||
(random()*20)::int AS b,
|
||||
random()*10000.0 AS latency
|
||||
FROM generate_series(1, 10000);
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest(value, compression)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest(latency, 100)
|
||||
FROM latencies;
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest(value, compression)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest(latency, 100)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest(value, compression)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest(latency, 100)
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(latency, 100, 0.99)
|
||||
FROM latencies;
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(latency, 100, 0.99)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile(latency, 100, 0.99)
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(latency, 100, ARRAY[0.99, 0.95])
|
||||
FROM latencies;
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(latency, 100, ARRAY[0.99, 0.95])
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile(latency, 100, ARRAY[0.99, 0.95])
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(latency, 100, 9000)
|
||||
FROM latencies;
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(latency, 100, 9000)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile_of(latency, 100, 9000)
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(latency, 100, ARRAY[9000, 9500])
|
||||
FROM latencies;
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(latency, 100, ARRAY[9000, 9500])
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
|
||||
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT b, tdigest_percentile_of(latency, 100, ARRAY[9000, 9500])
|
||||
FROM latencies
|
||||
GROUP BY b;
|
||||
|
||||
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
|
||||
SELECT tdigest(latency, 100) FROM latencies;
|
||||
SELECT tdigest_percentile(latency, 100, 0.99) FROM latencies;
|
||||
SELECT tdigest_percentile(latency, 100, ARRAY[0.99, 0.95]) FROM latencies;
|
||||
SELECT tdigest_percentile_of(latency, 100, 9000) FROM latencies;
|
||||
SELECT tdigest_percentile_of(latency, 100, ARRAY[9000, 9500]) FROM latencies;
|
||||
|
||||
CREATE TABLE latencies_rollup (a int, tdigest tdigest);
|
||||
SELECT create_distributed_table('latencies_rollup', 'a', colocate_with => 'latencies');
|
||||
|
||||
INSERT INTO latencies_rollup
|
||||
SELECT a, tdigest(latency, 100)
|
||||
FROM latencies
|
||||
GROUP BY a;
|
||||
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest(tdigest)
|
||||
FROM latencies_rollup;
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest(tdigest)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest(tdigest)
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(tdigest, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(tdigest, 0.99)
|
||||
FROM latencies_rollup;
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(tdigest, quantile)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(tdigest, 0.99)
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile(tdigest, ARRAY[0.99, 0.95])
|
||||
FROM latencies_rollup;
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile(tdigest, ARRAY[0.99, 0.95])
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(tdigest, 9000)
|
||||
FROM latencies_rollup;
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(tdigest, 9000)
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
|
||||
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT tdigest_percentile_of(tdigest, ARRAY[9000, 9500])
|
||||
FROM latencies_rollup;
|
||||
|
||||
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
|
||||
EXPLAIN (COSTS OFF, VERBOSE)
|
||||
SELECT a, tdigest_percentile_of(tdigest, ARRAY[9000, 9500])
|
||||
FROM latencies_rollup
|
||||
GROUP BY a;
|
||||
|
||||
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
|
||||
SELECT tdigest(tdigest) FROM latencies_rollup;
|
||||
SELECT tdigest_percentile(tdigest, 0.99) FROM latencies_rollup;
|
||||
SELECT tdigest_percentile(tdigest, ARRAY[0.99, 0.95]) FROM latencies_rollup;
|
||||
SELECT tdigest_percentile_of(tdigest, 9000) FROM latencies_rollup;
|
||||
SELECT tdigest_percentile_of(tdigest, ARRAY[9000, 9500]) FROM latencies_rollup;
|
||||
|
||||
SET client_min_messages TO WARNING; -- suppress cascade messages
|
||||
DROP SCHEMA tdigest_aggregate_support CASCADE;
|
Loading…
Reference in New Issue