mirror of https://github.com/citusdata/citus.git
Drop postgres 11 support
parent
7081690480
commit
03832f353c
|
@ -199,7 +199,7 @@ jobs:
|
|||
make -C src/test/regress \
|
||||
check-citus-upgrade \
|
||||
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
|
||||
citus-pre-tar=/install-pg11-citus${citus_version}.tar \
|
||||
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
|
||||
citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \
|
||||
done;
|
||||
|
||||
|
@ -210,7 +210,7 @@ jobs:
|
|||
make -C src/test/regress \
|
||||
check-citus-upgrade-mixed \
|
||||
bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \
|
||||
citus-pre-tar=/install-pg11-citus${citus_version}.tar \
|
||||
citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \
|
||||
citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \
|
||||
done;
|
||||
no_output_timeout: 2m
|
||||
|
@ -426,10 +426,6 @@ workflows:
|
|||
ignore:
|
||||
- /release-[0-9]+\.[0-9]+.*/ # match with releaseX.Y.*
|
||||
|
||||
- build:
|
||||
name: build-11
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
- build:
|
||||
name: build-12
|
||||
pg_major: 12
|
||||
|
@ -442,56 +438,6 @@ workflows:
|
|||
- check-style
|
||||
- check-sql-snapshots
|
||||
|
||||
- test-citus:
|
||||
name: 'test-11_check-multi'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-multi
|
||||
requires: [build-11]
|
||||
- test-citus:
|
||||
name: 'test-11_check-mx'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-multi-mx
|
||||
requires: [build-11]
|
||||
- test-citus:
|
||||
name: 'test-11_check-vanilla'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-vanilla
|
||||
requires: [build-11]
|
||||
- test-citus:
|
||||
name: 'test-11_check-isolation'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-isolation
|
||||
requires: [build-11]
|
||||
- test-citus:
|
||||
name: 'test-11_check-worker'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-worker
|
||||
requires: [build-11]
|
||||
- test-citus:
|
||||
name: 'test-11_check-operations'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-operations
|
||||
requires: [build-11]
|
||||
- test-citus:
|
||||
name: 'test-11_check-follower-cluster'
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
make: check-follower-cluster
|
||||
requires: [build-11]
|
||||
- test-citus:
|
||||
name: 'test-11_check-failure'
|
||||
pg_major: 11
|
||||
image: citus/failtester
|
||||
image_tag: '11.9'
|
||||
make: check-failure
|
||||
requires: [build-11]
|
||||
|
||||
- test-citus:
|
||||
name: 'test-12_check-multi'
|
||||
pg_major: 12
|
||||
|
@ -628,13 +574,6 @@ workflows:
|
|||
make: check-failure
|
||||
requires: [build-13]
|
||||
|
||||
- test-pg-upgrade:
|
||||
name: 'test-11-12_check-pg-upgrade'
|
||||
old_pg_major: 11
|
||||
new_pg_major: 12
|
||||
image_tag: 11-12-13
|
||||
requires: [build-11,build-12]
|
||||
|
||||
- test-pg-upgrade:
|
||||
name: 'test-12-13_check-pg-upgrade'
|
||||
old_pg_major: 12
|
||||
|
@ -643,10 +582,10 @@ workflows:
|
|||
requires: [build-12,build-13]
|
||||
|
||||
- test-citus-upgrade:
|
||||
name: test-11_check-citus-upgrade
|
||||
pg_major: 11
|
||||
image_tag: '11.9'
|
||||
requires: [build-11]
|
||||
name: test-12_check-citus-upgrade
|
||||
pg_major: 12
|
||||
image_tag: '12.4'
|
||||
requires: [build-12]
|
||||
|
||||
- ch_benchmark:
|
||||
requires: [build-13]
|
||||
|
|
|
@ -2556,7 +2556,7 @@ if test -z "$version_num"; then
|
|||
as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5
|
||||
fi
|
||||
|
||||
if test "$version_num" != '11' -a "$version_num" != '12' -a "$version_num" != '13'; then
|
||||
if test "$version_num" != '12' -a "$version_num" != '13'; then
|
||||
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
|
||||
|
@ -4533,16 +4533,9 @@ cat >>confdefs.h <<_ACEOF
|
|||
_ACEOF
|
||||
|
||||
|
||||
if test "$version_num" != '11'; then
|
||||
HAS_TABLEAM=yes
|
||||
|
||||
$as_echo "#define HAS_TABLEAM 1" >>confdefs.h
|
||||
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: postgres version does not support table access methods" >&5
|
||||
$as_echo "$as_me: postgres version does not support table access methods" >&6;}
|
||||
fi;
|
||||
|
||||
# Require lz4 & zstd only if we are compiling columnar
|
||||
if test "$HAS_TABLEAM" == 'yes'; then
|
||||
#
|
||||
|
|
|
@ -74,7 +74,7 @@ if test -z "$version_num"; then
|
|||
AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.])
|
||||
fi
|
||||
|
||||
if test "$version_num" != '11' -a "$version_num" != '12' -a "$version_num" != '13'; then
|
||||
if test "$version_num" != '12' -a "$version_num" != '13'; then
|
||||
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
|
||||
else
|
||||
AC_MSG_NOTICE([building against PostgreSQL $version_num])
|
||||
|
@ -216,12 +216,8 @@ PGAC_ARG_REQ(with, reports-hostname, [HOSTNAME],
|
|||
AC_DEFINE_UNQUOTED(REPORTS_BASE_URL, "$REPORTS_BASE_URL",
|
||||
[Base URL for statistics collection and update checks])
|
||||
|
||||
if test "$version_num" != '11'; then
|
||||
HAS_TABLEAM=yes
|
||||
AC_DEFINE([HAS_TABLEAM], 1, [Define to 1 to build with table access method support, pg12 and up])
|
||||
else
|
||||
AC_MSG_NOTICE([postgres version does not support table access methods])
|
||||
fi;
|
||||
|
||||
# Require lz4 & zstd only if we are compiling columnar
|
||||
if test "$HAS_TABLEAM" == 'yes'; then
|
||||
|
|
|
@ -226,10 +226,8 @@ DecompressBuffer(StringInfo buffer,
|
|||
|
||||
case COMPRESSION_PG_LZ:
|
||||
{
|
||||
StringInfo decompressedBuffer = NULL;
|
||||
uint32 compressedDataSize = VARSIZE(buffer->data) - COLUMNAR_COMPRESS_HDRSZ;
|
||||
uint32 decompressedDataSize = COLUMNAR_COMPRESS_RAWSIZE(buffer->data);
|
||||
int32 decompressedByteCount = 0;
|
||||
|
||||
if (compressedDataSize + COLUMNAR_COMPRESS_HDRSZ != buffer->len)
|
||||
{
|
||||
|
@ -240,17 +238,11 @@ DecompressBuffer(StringInfo buffer,
|
|||
|
||||
char *decompressedData = palloc0(decompressedDataSize);
|
||||
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
decompressedByteCount = pglz_decompress(COLUMNAR_COMPRESS_RAWDATA(
|
||||
int32 decompressedByteCount = pglz_decompress(COLUMNAR_COMPRESS_RAWDATA(
|
||||
buffer->data),
|
||||
compressedDataSize, decompressedData,
|
||||
compressedDataSize,
|
||||
decompressedData,
|
||||
decompressedDataSize, true);
|
||||
#else
|
||||
decompressedByteCount = pglz_decompress(COLUMNAR_COMPRESS_RAWDATA(
|
||||
buffer->data),
|
||||
compressedDataSize, decompressedData,
|
||||
decompressedDataSize);
|
||||
#endif
|
||||
|
||||
if (decompressedByteCount < 0)
|
||||
{
|
||||
|
@ -258,7 +250,7 @@ DecompressBuffer(StringInfo buffer,
|
|||
errdetail("compressed data is corrupted")));
|
||||
}
|
||||
|
||||
decompressedBuffer = palloc0(sizeof(StringInfoData));
|
||||
StringInfo decompressedBuffer = palloc0(sizeof(StringInfoData));
|
||||
decompressedBuffer->data = decompressedData;
|
||||
decompressedBuffer->len = decompressedDataSize;
|
||||
decompressedBuffer->maxlen = decompressedDataSize;
|
||||
|
|
|
@ -39,14 +39,9 @@ PG_FUNCTION_INFO_V1(column_store_memory_stats);
|
|||
Datum
|
||||
column_store_memory_stats(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
const int resultColumnCount = 3;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
tupleDescriptor = CreateTemplateTupleDesc(resultColumnCount);
|
||||
#else
|
||||
tupleDescriptor = CreateTemplateTupleDesc(resultColumnCount, false);
|
||||
#endif
|
||||
TupleDesc tupleDescriptor = CreateTemplateTupleDesc(resultColumnCount);
|
||||
|
||||
TupleDescInitEntry(tupleDescriptor, (AttrNumber) 1, "TopMemoryContext",
|
||||
INT8OID, -1, 0);
|
||||
|
|
|
@ -1067,15 +1067,10 @@ InsertTupleAndEnforceConstraints(ModifyState *state, Datum *values, bool *nulls)
|
|||
TupleDesc tupleDescriptor = RelationGetDescr(state->rel);
|
||||
HeapTuple tuple = heap_form_tuple(tupleDescriptor, values, nulls);
|
||||
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
TupleTableSlot *slot = ExecInitExtraTupleSlot(state->estate, tupleDescriptor,
|
||||
&TTSOpsHeapTuple);
|
||||
|
||||
ExecStoreHeapTuple(tuple, slot, false);
|
||||
#else
|
||||
TupleTableSlot *slot = ExecInitExtraTupleSlot(state->estate, tupleDescriptor);
|
||||
ExecStoreTuple(tuple, slot, InvalidBuffer, false);
|
||||
#endif
|
||||
|
||||
/* use ExecSimpleRelationInsert to enforce constraints */
|
||||
ExecSimpleRelationInsert(state->estate, slot);
|
||||
|
@ -1127,20 +1122,16 @@ FinishModifyRelation(ModifyState *state)
|
|||
static EState *
|
||||
create_estate_for_relation(Relation rel)
|
||||
{
|
||||
ResultRelInfo *resultRelInfo;
|
||||
|
||||
EState *estate = CreateExecutorState();
|
||||
|
||||
RangeTblEntry *rte = makeNode(RangeTblEntry);
|
||||
rte->rtekind = RTE_RELATION;
|
||||
rte->relid = RelationGetRelid(rel);
|
||||
rte->relkind = rel->rd_rel->relkind;
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
rte->rellockmode = AccessShareLock;
|
||||
ExecInitRangeTable(estate, list_make1(rte));
|
||||
#endif
|
||||
|
||||
resultRelInfo = makeNode(ResultRelInfo);
|
||||
ResultRelInfo *resultRelInfo = makeNode(ResultRelInfo);
|
||||
InitResultRelInfo(resultRelInfo, rel, 1, NULL, 0);
|
||||
|
||||
estate->es_result_relations = resultRelInfo;
|
||||
|
@ -1149,15 +1140,6 @@ create_estate_for_relation(Relation rel)
|
|||
|
||||
estate->es_output_cid = GetCurrentCommandId(true);
|
||||
|
||||
#if PG_VERSION_NUM < 120000
|
||||
|
||||
/* Triggers might need a slot */
|
||||
if (resultRelInfo->ri_TrigDesc)
|
||||
{
|
||||
estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Prepare to catch AFTER triggers. */
|
||||
AfterTriggerBeginQuery();
|
||||
|
||||
|
|
|
@ -23,13 +23,10 @@
|
|||
#include "commands/defrem.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/clauses.h"
|
||||
#include "optimizer/predtest.h"
|
||||
#endif
|
||||
#include "optimizer/restrictinfo.h"
|
||||
#include "storage/fd.h"
|
||||
#include "utils/guc.h"
|
||||
#include "utils/memutils.h"
|
||||
|
|
|
@ -435,11 +435,6 @@ AlterDistributedTable(TableConversionParameters *params)
|
|||
TableConversionReturn *
|
||||
AlterTableSetAccessMethod(TableConversionParameters *params)
|
||||
{
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
ereport(ERROR, (errmsg("table access methods are not supported "
|
||||
"for Postgres versions earlier than 12")));
|
||||
#endif
|
||||
|
||||
EnsureRelationExists(params->relationId);
|
||||
EnsureTableOwner(params->relationId);
|
||||
|
||||
|
@ -963,7 +958,6 @@ CreateTableConversion(TableConversionParameters *params)
|
|||
BuildDistributionKeyFromColumnName(relation, con->distributionColumn);
|
||||
|
||||
con->originalAccessMethod = NULL;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (!PartitionedTable(con->relationId))
|
||||
{
|
||||
HeapTuple amTuple = SearchSysCache1(AMOID, ObjectIdGetDatum(
|
||||
|
@ -977,7 +971,6 @@ CreateTableConversion(TableConversionParameters *params)
|
|||
con->originalAccessMethod = NameStr(amForm->amname);
|
||||
ReleaseSysCache(amTuple);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
con->colocatedTableList = NIL;
|
||||
|
@ -1296,12 +1289,10 @@ GetNonGeneratedStoredColumnNameList(Oid relationId)
|
|||
continue;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
if (currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
const char *quotedColumnName = quote_identifier(NameStr(currentColumn->attname));
|
||||
nonStoredColumnNameList = lappend(nonStoredColumnNameList,
|
||||
|
|
|
@ -14,9 +14,6 @@
|
|||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#if (PG_VERSION_NUM < PG_VERSION_12)
|
||||
#include "access/htup_details.h"
|
||||
#endif
|
||||
#include "access/xact.h"
|
||||
#include "catalog/pg_constraint.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
|
|
|
@ -50,35 +50,21 @@ static void EnsureSequentialModeForCollationDDL(void);
|
|||
static char *
|
||||
CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollationName)
|
||||
{
|
||||
char *schemaName = NULL;
|
||||
StringInfoData collationNameDef;
|
||||
const char *providerString = NULL;
|
||||
HeapTuple heapTuple = NULL;
|
||||
Form_pg_collation collationForm = NULL;
|
||||
char collprovider;
|
||||
const char *collcollate;
|
||||
const char *collctype;
|
||||
const char *collname;
|
||||
Oid collnamespace;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
bool collisdeterministic;
|
||||
#endif
|
||||
|
||||
heapTuple = SearchSysCache1(COLLOID, ObjectIdGetDatum(collationId));
|
||||
HeapTuple heapTuple = SearchSysCache1(COLLOID, ObjectIdGetDatum(collationId));
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
elog(ERROR, "citus cache lookup failed for collation %u", collationId);
|
||||
}
|
||||
|
||||
collationForm = (Form_pg_collation) GETSTRUCT(heapTuple);
|
||||
collprovider = collationForm->collprovider;
|
||||
collcollate = NameStr(collationForm->collcollate);
|
||||
collctype = NameStr(collationForm->collctype);
|
||||
collnamespace = collationForm->collnamespace;
|
||||
collname = NameStr(collationForm->collname);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
collisdeterministic = collationForm->collisdeterministic;
|
||||
#endif
|
||||
Form_pg_collation collationForm = (Form_pg_collation) GETSTRUCT(heapTuple);
|
||||
char collprovider = collationForm->collprovider;
|
||||
const char *collcollate = NameStr(collationForm->collcollate);
|
||||
const char *collctype = NameStr(collationForm->collctype);
|
||||
Oid collnamespace = collationForm->collnamespace;
|
||||
const char *collname = NameStr(collationForm->collname);
|
||||
bool collisdeterministic = collationForm->collisdeterministic;
|
||||
|
||||
if (collowner != NULL)
|
||||
{
|
||||
|
@ -86,9 +72,9 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
}
|
||||
|
||||
ReleaseSysCache(heapTuple);
|
||||
schemaName = get_namespace_name(collnamespace);
|
||||
char *schemaName = get_namespace_name(collnamespace);
|
||||
*quotedCollationName = quote_qualified_identifier(schemaName, collname);
|
||||
providerString =
|
||||
const char *providerString =
|
||||
collprovider == COLLPROVIDER_DEFAULT ? "default" :
|
||||
collprovider == COLLPROVIDER_ICU ? "icu" :
|
||||
collprovider == COLLPROVIDER_LIBC ? "libc" : NULL;
|
||||
|
@ -117,12 +103,10 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
quote_literal_cstr(collctype));
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (!collisdeterministic)
|
||||
{
|
||||
appendStringInfoString(&collationNameDef, ", deterministic = false");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
appendStringInfoChar(&collationNameDef, ')');
|
||||
|
|
|
@ -29,9 +29,7 @@
|
|||
#include "catalog/pg_extension.h"
|
||||
#include "catalog/pg_namespace.h"
|
||||
#include "catalog/pg_opclass.h"
|
||||
#if PG_VERSION_NUM >= 12000
|
||||
#include "catalog/pg_proc.h"
|
||||
#endif
|
||||
#include "catalog/pg_trigger.h"
|
||||
#include "commands/defrem.h"
|
||||
#include "commands/extension.h"
|
||||
|
@ -819,18 +817,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
|||
|
||||
ErrorIfTableIsACatalogTable(relation);
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
|
||||
/* verify target relation does not use WITH (OIDS) PostgreSQL feature */
|
||||
if (relationDesc->tdhasoid)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot distribute relation: %s", relationName),
|
||||
errdetail("Distributed relations must not specify the WITH "
|
||||
"(OIDS) option in their definitions.")));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* verify target relation does not use identity columns */
|
||||
if (RelationUsesIdentityColumns(relationDesc))
|
||||
{
|
||||
|
@ -866,7 +852,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
|||
"defined to use hash partitioning.")));
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (distributionColumn->varcollid != InvalidOid &&
|
||||
!get_collation_isdeterministic(distributionColumn->varcollid))
|
||||
{
|
||||
|
@ -874,7 +859,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
|||
errmsg("Hash distributed partition columns may not use "
|
||||
"a non deterministic collation")));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
else if (distributionMethod == DISTRIBUTE_BY_RANGE)
|
||||
{
|
||||
|
@ -1539,12 +1523,8 @@ DoCopyFromLocalTableIntoShards(Relation distributedRelation,
|
|||
EState *estate)
|
||||
{
|
||||
/* begin reading from local table */
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
TableScanDesc scan = table_beginscan(distributedRelation, GetActiveSnapshot(), 0,
|
||||
NULL);
|
||||
#else
|
||||
HeapScanDesc scan = heap_beginscan(distributedRelation, GetActiveSnapshot(), 0, NULL);
|
||||
#endif
|
||||
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
|
||||
|
||||
|
@ -1593,11 +1573,7 @@ DoCopyFromLocalTableIntoShards(Relation distributedRelation,
|
|||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
/* finish reading from the local table */
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
table_endscan(scan);
|
||||
#else
|
||||
heap_endscan(scan);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -1615,10 +1591,8 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
|
|||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
char *columnName = NameStr(currentColumn->attname);
|
||||
|
||||
if (currentColumn->attisdropped
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|| currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
#endif
|
||||
if (currentColumn->attisdropped ||
|
||||
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
{
|
||||
continue;
|
||||
|
@ -1660,7 +1634,6 @@ static bool
|
|||
DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
|
||||
Var *distributionColumn)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
Form_pg_attribute attributeForm = TupleDescAttr(relationDesc,
|
||||
distributionColumn->varattno - 1);
|
||||
|
||||
|
@ -1668,7 +1641,6 @@ DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
|
|||
{
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ static DistributeObjectOps Any_AlterEnum = {
|
|||
.deparse = DeparseAlterEnumStmt,
|
||||
.qualify = QualifyAlterEnumStmt,
|
||||
.preprocess = PreprocessAlterEnumStmt,
|
||||
.postprocess = PostprocessAlterEnumStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterEnumStmtObjectAddress,
|
||||
};
|
||||
static DistributeObjectOps Any_AlterExtension = {
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
#include "access/xact.h"
|
||||
#include "catalog/namespace.h"
|
||||
#include "catalog/pg_constraint.h"
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_12)
|
||||
#include "access/genam.h"
|
||||
#endif
|
||||
#include "catalog/pg_type.h"
|
||||
#include "distributed/colocation_utils.h"
|
||||
#include "distributed/commands.h"
|
||||
|
@ -715,14 +713,9 @@ get_relation_constraint_oid_compat(HeapTuple heapTuple)
|
|||
{
|
||||
Assert(heapTuple != NULL);
|
||||
|
||||
Oid constraintOid = InvalidOid;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
|
||||
constraintOid = constraintForm->oid;
|
||||
#else
|
||||
constraintOid = HeapTupleGetOid(heapTuple);
|
||||
#endif
|
||||
Oid constraintOid = constraintForm->oid;
|
||||
|
||||
return constraintOid;
|
||||
}
|
||||
|
@ -1284,8 +1277,6 @@ GetForeignConstraintCommandsToReferenceTable(ShardInterval *shardInterval)
|
|||
static void
|
||||
UpdateConstraintIsValid(Oid constraintId, bool isValid)
|
||||
{
|
||||
HeapTuple heapTuple = NULL;
|
||||
SysScanDesc scanDescriptor;
|
||||
ScanKeyData scankey[1];
|
||||
Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(pgConstraint);
|
||||
|
@ -1294,21 +1285,17 @@ UpdateConstraintIsValid(Oid constraintId, bool isValid)
|
|||
bool replace[Natts_pg_constraint];
|
||||
|
||||
ScanKeyInit(&scankey[0],
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
Anum_pg_constraint_oid,
|
||||
#else
|
||||
ObjectIdAttributeNumber,
|
||||
#endif
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(constraintId));
|
||||
|
||||
scanDescriptor = systable_beginscan(pgConstraint,
|
||||
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint,
|
||||
ConstraintOidIndexId,
|
||||
true,
|
||||
NULL,
|
||||
1,
|
||||
scankey);
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
if (!HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
elog(ERROR, "could not find tuple for constraint %u", constraintId);
|
||||
|
|
|
@ -21,9 +21,7 @@
|
|||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/genam.h"
|
||||
#endif
|
||||
#include "access/htup_details.h"
|
||||
#include "access/xact.h"
|
||||
#include "catalog/pg_aggregate.h"
|
||||
|
@ -736,9 +734,6 @@ static char *
|
|||
GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
|
||||
{
|
||||
StringInfoData buf = { 0 };
|
||||
HeapTuple aggtup = NULL;
|
||||
Form_pg_aggregate agg = NULL;
|
||||
int numargs = 0;
|
||||
int i = 0;
|
||||
Oid *argtypes = NULL;
|
||||
char **argnames = NULL;
|
||||
|
@ -762,7 +757,6 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
|
|||
const char *name = NameStr(proc->proname);
|
||||
const char *nsp = get_namespace_name(proc->pronamespace);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (useCreateOrReplace)
|
||||
{
|
||||
appendStringInfo(&buf, "CREATE OR REPLACE AGGREGATE %s(",
|
||||
|
@ -773,20 +767,16 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
|
|||
appendStringInfo(&buf, "CREATE AGGREGATE %s(",
|
||||
quote_qualified_identifier(nsp, name));
|
||||
}
|
||||
#else
|
||||
appendStringInfo(&buf, "CREATE AGGREGATE %s(",
|
||||
quote_qualified_identifier(nsp, name));
|
||||
#endif
|
||||
|
||||
/* Parameters, borrows heavily from print_function_arguments in postgres */
|
||||
numargs = get_func_arg_info(proctup, &argtypes, &argnames, &argmodes);
|
||||
int numargs = get_func_arg_info(proctup, &argtypes, &argnames, &argmodes);
|
||||
|
||||
aggtup = SearchSysCache1(AGGFNOID, funcOid);
|
||||
HeapTuple aggtup = SearchSysCache1(AGGFNOID, funcOid);
|
||||
if (!HeapTupleIsValid(aggtup))
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for %d", funcOid);
|
||||
}
|
||||
agg = (Form_pg_aggregate) GETSTRUCT(aggtup);
|
||||
Form_pg_aggregate agg = (Form_pg_aggregate) GETSTRUCT(aggtup);
|
||||
|
||||
if (AGGKIND_IS_ORDERED_SET(agg->aggkind))
|
||||
{
|
||||
|
@ -1066,12 +1056,6 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
|
|||
ReleaseSysCache(aggtup);
|
||||
ReleaseSysCache(proctup);
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
if (useCreateOrReplace)
|
||||
{
|
||||
return WrapCreateOrReplace(buf.data);
|
||||
}
|
||||
#endif
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
|
|
@ -11,9 +11,7 @@
|
|||
#include "postgres.h"
|
||||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/genam.h"
|
||||
#endif
|
||||
#include "access/htup_details.h"
|
||||
#include "access/xact.h"
|
||||
#include "catalog/catalog.h"
|
||||
|
@ -93,9 +91,7 @@ struct DropRelationCallbackState
|
|||
*/
|
||||
struct ReindexIndexCallbackState
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
bool concurrent;
|
||||
#endif
|
||||
Oid locked_table_oid;
|
||||
};
|
||||
|
||||
|
@ -544,13 +540,8 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand,
|
|||
{
|
||||
Relation relation = NULL;
|
||||
Oid relationId = InvalidOid;
|
||||
bool isCitusRelation = false;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
LOCKMODE lockmode = reindexStatement->concurrent ? ShareUpdateExclusiveLock :
|
||||
AccessExclusiveLock;
|
||||
#else
|
||||
LOCKMODE lockmode = AccessExclusiveLock;
|
||||
#endif
|
||||
MemoryContext relationContext = NULL;
|
||||
|
||||
Assert(reindexStatement->kind == REINDEX_OBJECT_INDEX ||
|
||||
|
@ -558,14 +549,11 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand,
|
|||
|
||||
if (reindexStatement->kind == REINDEX_OBJECT_INDEX)
|
||||
{
|
||||
Oid indOid;
|
||||
struct ReindexIndexCallbackState state;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
state.concurrent = reindexStatement->concurrent;
|
||||
#endif
|
||||
state.locked_table_oid = InvalidOid;
|
||||
|
||||
indOid = RangeVarGetRelidExtended(reindexStatement->relation,
|
||||
Oid indOid = RangeVarGetRelidExtended(reindexStatement->relation,
|
||||
lockmode, 0,
|
||||
RangeVarCallbackForReindexIndex,
|
||||
&state);
|
||||
|
@ -581,7 +569,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand,
|
|||
relationId = RelationGetRelid(relation);
|
||||
}
|
||||
|
||||
isCitusRelation = IsCitusTable(relationId);
|
||||
bool isCitusRelation = IsCitusTable(relationId);
|
||||
|
||||
if (reindexStatement->relation->schemaname == NULL)
|
||||
{
|
||||
|
@ -613,12 +601,8 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand,
|
|||
{
|
||||
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||
ddlJob->targetRelationId = relationId;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
ddlJob->concurrentIndexCmd = reindexStatement->concurrent;
|
||||
ddlJob->startNewTransaction = reindexStatement->concurrent;
|
||||
#else
|
||||
ddlJob->concurrentIndexCmd = false;
|
||||
#endif
|
||||
ddlJob->commandString = reindexCommand;
|
||||
ddlJob->taskList = CreateReindexTaskList(relationId, reindexStatement);
|
||||
|
||||
|
@ -1053,11 +1037,7 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, Oid relId, Oid oldRelI
|
|||
* non-concurrent case and table locks used by index_concurrently_*() for
|
||||
* concurrent case.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
table_lockmode = state->concurrent ? ShareUpdateExclusiveLock : ShareLock;
|
||||
#else
|
||||
table_lockmode = ShareLock;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If we previously locked some other index's heap, and the name we're
|
||||
|
|
|
@ -423,21 +423,13 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
{
|
||||
Oid tableId = RangeVarGetRelid(copyStatement->relation, NoLock, false);
|
||||
|
||||
CitusCopyDestReceiver *copyDest = NULL;
|
||||
DestReceiver *dest = NULL;
|
||||
|
||||
Relation copiedDistributedRelation = NULL;
|
||||
Form_pg_class copiedDistributedRelationTuple = NULL;
|
||||
List *columnNameList = NIL;
|
||||
int partitionColumnIndex = INVALID_PARTITION_COLUMN_INDEX;
|
||||
|
||||
EState *executorState = NULL;
|
||||
MemoryContext executorTupleContext = NULL;
|
||||
ExprContext *executorExpressionContext = NULL;
|
||||
|
||||
bool stopOnFailure = false;
|
||||
|
||||
CopyState copyState = NULL;
|
||||
uint64 processedRowCount = 0;
|
||||
|
||||
ErrorContextCallback errorCallback;
|
||||
|
@ -469,10 +461,8 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
char *columnName = NameStr(currentColumn->attname);
|
||||
|
||||
if (currentColumn->attisdropped
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|| currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
#endif
|
||||
if (currentColumn->attisdropped ||
|
||||
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
{
|
||||
continue;
|
||||
|
@ -481,9 +471,9 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
columnNameList = lappend(columnNameList, columnName);
|
||||
}
|
||||
|
||||
executorState = CreateExecutorState();
|
||||
executorTupleContext = GetPerTupleMemoryContext(executorState);
|
||||
executorExpressionContext = GetPerTupleExprContext(executorState);
|
||||
EState *executorState = CreateExecutorState();
|
||||
MemoryContext executorTupleContext = GetPerTupleMemoryContext(executorState);
|
||||
ExprContext *executorExpressionContext = GetPerTupleExprContext(executorState);
|
||||
|
||||
if (IsCitusTableType(tableId, REFERENCE_TABLE))
|
||||
{
|
||||
|
@ -491,9 +481,11 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
}
|
||||
|
||||
/* set up the destination for the COPY */
|
||||
copyDest = CreateCitusCopyDestReceiver(tableId, columnNameList, partitionColumnIndex,
|
||||
executorState, stopOnFailure, NULL);
|
||||
dest = (DestReceiver *) copyDest;
|
||||
CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(tableId, columnNameList,
|
||||
partitionColumnIndex,
|
||||
executorState,
|
||||
stopOnFailure, NULL);
|
||||
DestReceiver *dest = (DestReceiver *) copyDest;
|
||||
dest->rStartup(dest, 0, tupleDescriptor);
|
||||
|
||||
/*
|
||||
|
@ -501,8 +493,9 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
* of BeginCopyFrom. However, we obviously should not do this in relcache
|
||||
* and therefore make a copy of the Relation.
|
||||
*/
|
||||
copiedDistributedRelation = (Relation) palloc(sizeof(RelationData));
|
||||
copiedDistributedRelationTuple = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
|
||||
Relation copiedDistributedRelation = (Relation) palloc(sizeof(RelationData));
|
||||
Form_pg_class copiedDistributedRelationTuple =
|
||||
(Form_pg_class) palloc(CLASS_TUPLE_SIZE);
|
||||
|
||||
/*
|
||||
* There is no need to deep copy everything. We will just deep copy of the fields
|
||||
|
@ -527,7 +520,7 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
}
|
||||
|
||||
/* initialize copy state to read from COPY data source */
|
||||
copyState = BeginCopyFrom(NULL,
|
||||
CopyState copyState = BeginCopyFrom(NULL,
|
||||
copiedDistributedRelation,
|
||||
copyStatement->filename,
|
||||
copyStatement->is_program,
|
||||
|
@ -945,18 +938,15 @@ CanUseBinaryCopyFormat(TupleDesc tupleDescription)
|
|||
for (int columnIndex = 0; columnIndex < totalColumnCount; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescription, columnIndex);
|
||||
Oid typeId = InvalidOid;
|
||||
|
||||
if (currentColumn->attisdropped
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|| currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
#endif
|
||||
if (currentColumn->attisdropped ||
|
||||
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
typeId = currentColumn->atttypid;
|
||||
Oid typeId = currentColumn->atttypid;
|
||||
if (!CanUseBinaryCopyFormatForType(typeId))
|
||||
{
|
||||
useBinaryCopyFormat = false;
|
||||
|
@ -1417,10 +1407,8 @@ TypeArrayFromTupleDescriptor(TupleDesc tupleDescriptor)
|
|||
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++)
|
||||
{
|
||||
Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
if (attr->attisdropped
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|| attr->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
#endif
|
||||
if (attr->attisdropped ||
|
||||
attr->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
{
|
||||
typeArray[columnIndex] = InvalidOid;
|
||||
|
@ -1589,10 +1577,8 @@ AppendCopyRowData(Datum *valueArray, bool *isNullArray, TupleDesc rowDescriptor,
|
|||
value = CoerceColumnValue(value, &columnCoercionPaths[columnIndex]);
|
||||
}
|
||||
|
||||
if (currentColumn->attisdropped
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|| currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
#endif
|
||||
if (currentColumn->attisdropped ||
|
||||
currentColumn->attgenerated == ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
{
|
||||
continue;
|
||||
|
@ -1712,10 +1698,8 @@ AvailableColumnCount(TupleDesc tupleDescriptor)
|
|||
{
|
||||
Form_pg_attribute currentColumn = TupleDescAttr(tupleDescriptor, columnIndex);
|
||||
|
||||
if (!currentColumn->attisdropped
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
&& currentColumn->attgenerated != ATTRIBUTE_GENERATED_STORED
|
||||
#endif
|
||||
if (!currentColumn->attisdropped &&
|
||||
currentColumn->attgenerated != ATTRIBUTE_GENERATED_STORED
|
||||
)
|
||||
{
|
||||
columnCount++;
|
||||
|
@ -2993,13 +2977,11 @@ ProcessCopyStmt(CopyStmt *copyStatement, QueryCompletionCompat *completionTag, c
|
|||
{
|
||||
if (copyStatement->is_from)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (copyStatement->whereClause)
|
||||
{
|
||||
ereport(ERROR, (errmsg(
|
||||
"Citus does not support COPY FROM with WHERE")));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* check permissions, we're bypassing postgres' normal checks */
|
||||
CheckCopyPermissions(copyStatement);
|
||||
|
@ -3054,10 +3036,8 @@ CitusCopySelect(CopyStmt *copyStatement)
|
|||
{
|
||||
Form_pg_attribute attr = &tupleDescriptor->attrs[i];
|
||||
|
||||
if (attr->attisdropped
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|| attr->attgenerated
|
||||
#endif
|
||||
if (attr->attisdropped ||
|
||||
attr->attgenerated
|
||||
)
|
||||
{
|
||||
continue;
|
||||
|
@ -3312,10 +3292,8 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist)
|
|||
{
|
||||
if (TupleDescAttr(tupDesc, i)->attisdropped)
|
||||
continue;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (TupleDescAttr(tupDesc, i)->attgenerated)
|
||||
continue;
|
||||
#endif
|
||||
attnums = lappend_int(attnums, i + 1);
|
||||
}
|
||||
}
|
||||
|
@ -3340,14 +3318,12 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist)
|
|||
continue;
|
||||
if (namestrcmp(&(att->attname), name) == 0)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (att->attgenerated)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
|
||||
errmsg("column \"%s\" is a generated column",
|
||||
name),
|
||||
errdetail("Generated columns cannot be used in COPY.")));
|
||||
#endif
|
||||
attnum = att->attnum;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -14,9 +14,7 @@
|
|||
|
||||
#include "access/heapam.h"
|
||||
#include "access/htup_details.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/table.h"
|
||||
#endif
|
||||
#include "catalog/catalog.h"
|
||||
#include "catalog/pg_auth_members.h"
|
||||
#include "catalog/pg_authid.h"
|
||||
|
@ -543,11 +541,7 @@ GenerateAlterRoleSetCommandForRole(Oid roleid)
|
|||
List *commands = NIL;
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
TableScanDesc scan = table_beginscan_catalog(DbRoleSetting, 0, NULL);
|
||||
#else
|
||||
HeapScanDesc scan = heap_beginscan_catalog(DbRoleSetting, 0, NULL);
|
||||
#endif
|
||||
|
||||
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
|
|
|
@ -178,13 +178,11 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList,
|
|||
continue;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED)
|
||||
{
|
||||
/* skip columns with GENERATED AS ALWAYS expressions */
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
char *columnName = NameStr(attributeForm->attname);
|
||||
*columnNameList = lappend(*columnNameList, columnName);
|
||||
|
|
|
@ -594,14 +594,9 @@ GetExplicitStatisticsIdList(Oid relationId)
|
|||
HeapTuple heapTuple = systable_getnext(scanDescriptor);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Oid statisticsId = InvalidOid;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
FormData_pg_statistic_ext *statisticsForm =
|
||||
(FormData_pg_statistic_ext *) GETSTRUCT(heapTuple);
|
||||
statisticsId = statisticsForm->oid;
|
||||
#else
|
||||
statisticsId = HeapTupleGetOid(heapTuple);
|
||||
#endif
|
||||
Oid statisticsId = statisticsForm->oid;
|
||||
statisticsIdList = lappend_oid(statisticsIdList, statisticsId);
|
||||
|
||||
heapTuple = systable_getnext(scanDescriptor);
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
|
||||
#include "postgres.h"
|
||||
#include "distributed/pg_version_constants.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/genam.h"
|
||||
#endif
|
||||
#include "access/htup_details.h"
|
||||
#include "access/xact.h"
|
||||
#include "catalog/index.h"
|
||||
|
|
|
@ -12,13 +12,7 @@
|
|||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#include "access/genam.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/table.h"
|
||||
#else
|
||||
#include "access/heapam.h"
|
||||
#include "access/htup_details.h"
|
||||
#include "access/sysattr.h"
|
||||
#endif
|
||||
#include "catalog/indexing.h"
|
||||
#include "catalog/namespace.h"
|
||||
#include "catalog/pg_trigger.h"
|
||||
|
@ -101,11 +95,7 @@ GetTriggerTupleById(Oid triggerId, bool missingOk)
|
|||
int scanKeyCount = 1;
|
||||
ScanKeyData scanKey[1];
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
AttrNumber attrNumber = Anum_pg_trigger_oid;
|
||||
#else
|
||||
AttrNumber attrNumber = ObjectIdAttributeNumber;
|
||||
#endif
|
||||
|
||||
ScanKeyInit(&scanKey[0], attrNumber, BTEqualStrategyNumber,
|
||||
F_OIDEQ, ObjectIdGetDatum(triggerId));
|
||||
|
@ -198,14 +188,9 @@ get_relation_trigger_oid_compat(HeapTuple heapTuple)
|
|||
{
|
||||
Assert(HeapTupleIsValid(heapTuple));
|
||||
|
||||
Oid triggerOid = InvalidOid;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
Form_pg_trigger triggerForm = (Form_pg_trigger) GETSTRUCT(heapTuple);
|
||||
triggerOid = triggerForm->oid;
|
||||
#else
|
||||
triggerOid = HeapTupleGetOid(heapTuple);
|
||||
#endif
|
||||
Oid triggerOid = triggerForm->oid;
|
||||
|
||||
return triggerOid;
|
||||
}
|
||||
|
|
|
@ -322,8 +322,6 @@ List *
|
|||
PreprocessAlterEnumStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
List *commands = NIL;
|
||||
|
||||
ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false);
|
||||
if (!ShouldPropagateObject(&typeAddress))
|
||||
{
|
||||
|
@ -352,18 +350,8 @@ PreprocessAlterEnumStmt(Node *node, const char *queryString,
|
|||
* creating a DDLTaksList we won't return anything here. During the processing phase
|
||||
* we directly connect to workers and execute the commands remotely.
|
||||
*/
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
if (AlterEnumIsAddValue(castNode(AlterEnumStmt, node)))
|
||||
{
|
||||
/*
|
||||
* a plan cannot be made as it will be committed via 2PC when ran through the
|
||||
* executor, instead we directly distributed during processing phase
|
||||
*/
|
||||
return NIL;
|
||||
}
|
||||
#endif
|
||||
|
||||
commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) alterEnumStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
|
@ -371,79 +359,6 @@ PreprocessAlterEnumStmt(Node *node, const char *queryString,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessAlterEnumStmt is called after the AlterEnumStmt has been applied locally.
|
||||
*
|
||||
* This function is used for ALTER ENUM ... ADD VALUE for postgres versions lower than 12
|
||||
* to distribute the call. Before pg12 these statements could not be called in a
|
||||
* transaction. If we would plan the distirbution of these statements the same as we do
|
||||
* with the other statements they would get executed in a transaction to perform 2PC, that
|
||||
* would error out.
|
||||
*
|
||||
* If it would error on some workers we provide a warning to the user that the statement
|
||||
* failed to distributed with some detail on what to call after the cluster has been
|
||||
* repaired.
|
||||
*
|
||||
* For pg12 the statements can be called in a transaction but will only become visible
|
||||
* when the transaction commits. This is behaviour that is ok to perform in a 2PC.
|
||||
*/
|
||||
List *
|
||||
PostprocessAlterEnumStmt(Node *node, const char *queryString)
|
||||
{
|
||||
/*
|
||||
* Before pg12 ALTER ENUM ... ADD VALUE could not be within a xact block. Normally we
|
||||
* would propagate the statements in a xact block to perform 2pc on changes via ddl.
|
||||
* Instead we need to connect directly to the workers here and execute the command.
|
||||
*
|
||||
* From pg12 and up we use the normal infrastructure and create the ddl jobs during
|
||||
* planning.
|
||||
*/
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
AlterEnumStmt *stmt = castNode(AlterEnumStmt, node);
|
||||
ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateObject(&typeAddress))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
if (AlterEnumIsAddValue(stmt))
|
||||
{
|
||||
/*
|
||||
* ADD VALUE can't be executed in a transaction, we will execute optimistically
|
||||
* and on an error we will advise to fix the issue with the worker and rerun the
|
||||
* query with the IF NOT EXTISTS modifier. The modifier is needed as the value
|
||||
* might already be added to some nodes, but not all.
|
||||
*/
|
||||
|
||||
|
||||
/* qualification of the stmt happened during planning */
|
||||
const char *alterEnumStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
List *commands = list_make2(DISABLE_DDL_PROPAGATION, (void *) alterEnumStmtSql);
|
||||
|
||||
int result = SendBareOptionalCommandListToAllWorkersAsUser(commands, NULL);
|
||||
|
||||
if (result != RESPONSE_OKAY)
|
||||
{
|
||||
bool oldSkipIfNewValueExists = stmt->skipIfNewValExists;
|
||||
|
||||
/* deparse the query with IF NOT EXISTS */
|
||||
stmt->skipIfNewValExists = true;
|
||||
const char *alterEnumStmtIfNotExistsSql = DeparseTreeNode((Node *) stmt);
|
||||
stmt->skipIfNewValExists = oldSkipIfNewValueExists;
|
||||
|
||||
ereport(WARNING, (errmsg("not all workers applied change to enum"),
|
||||
errdetail("retry with: %s", alterEnumStmtIfNotExistsSql),
|
||||
errhint("make sure the coordinators can communicate with "
|
||||
"all workers")));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessDropTypeStmt is called for all DROP TYPE statements. For all types in the list that
|
||||
* citus has distributed to the workers it will drop the type on the workers as well. If
|
||||
|
|
|
@ -568,22 +568,6 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
|
|||
PostprocessAlterExtensionCitusUpdateStmt(parsetree);
|
||||
}
|
||||
|
||||
/*
|
||||
* Postgres added the following CommandCounterIncrement as a patch in:
|
||||
* - 10.7 -> 10.8
|
||||
* - 11.2 -> 11.3
|
||||
* The patch was a response to bug #15631.
|
||||
*
|
||||
* CommandCounterIncrement is used to make changes to the catalog visible for post
|
||||
* processing of create commands (eg. create type). It is safe to call
|
||||
* CommandCounterIncrement twice, as the call is a no-op if the command id is not
|
||||
* used yet.
|
||||
*
|
||||
* Once versions older than above are not deemed important anymore this patch can
|
||||
* be remove from citus.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
||||
PostStandardProcessUtility(parsetree);
|
||||
}
|
||||
PG_CATCH();
|
||||
|
|
|
@ -12,9 +12,7 @@
|
|||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "commands/defrem.h"
|
||||
#endif
|
||||
#include "commands/vacuum.h"
|
||||
#include "distributed/adaptive_executor.h"
|
||||
#include "distributed/commands.h"
|
||||
|
@ -41,10 +39,8 @@
|
|||
typedef struct CitusVacuumParams
|
||||
{
|
||||
int options;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
VacOptTernaryValue truncate;
|
||||
VacOptTernaryValue index_cleanup;
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13
|
||||
int nworkers;
|
||||
|
@ -349,11 +345,9 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
|
|||
}
|
||||
|
||||
/* if no flags remain, exit early */
|
||||
if (vacuumFlags == 0
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
&& vacuumParams.truncate == VACOPT_TERNARY_DEFAULT &&
|
||||
if (vacuumFlags == 0 &&
|
||||
vacuumParams.truncate == VACOPT_TERNARY_DEFAULT &&
|
||||
vacuumParams.index_cleanup == VACOPT_TERNARY_DEFAULT
|
||||
#endif
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13
|
||||
&& vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET
|
||||
#endif
|
||||
|
@ -390,7 +384,6 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
|
|||
appendStringInfoString(vacuumPrefix, "VERBOSE,");
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (vacuumFlags & VACOPT_SKIP_LOCKED)
|
||||
{
|
||||
appendStringInfoString(vacuumPrefix, "SKIP_LOCKED,");
|
||||
|
@ -411,7 +404,6 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
|
|||
"INDEX_CLEANUP," : "INDEX_CLEANUP false,"
|
||||
);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_13
|
||||
if (vacuumParams.nworkers != VACUUM_PARALLEL_NOTSET)
|
||||
|
@ -496,7 +488,6 @@ ExtractVacuumTargetRels(VacuumStmt *vacuumStmt)
|
|||
/*
|
||||
* VacuumStmtParams returns a CitusVacuumParams based on the supplied VacuumStmt.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|
||||
/*
|
||||
* This is mostly ExecVacuum from Postgres's commands/vacuum.c
|
||||
|
@ -611,16 +602,3 @@ VacuumStmtParams(VacuumStmt *vacstmt)
|
|||
(disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0);
|
||||
return params;
|
||||
}
|
||||
|
||||
|
||||
#else
|
||||
static CitusVacuumParams
|
||||
VacuumStmtParams(VacuumStmt *vacuumStmt)
|
||||
{
|
||||
CitusVacuumParams params;
|
||||
params.options = vacuumStmt->options;
|
||||
return params;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -125,22 +125,17 @@ get_extension_schema(Oid ext_oid)
|
|||
/* *INDENT-OFF* */
|
||||
Oid result;
|
||||
Relation rel;
|
||||
SysScanDesc scandesc;
|
||||
HeapTuple tuple;
|
||||
ScanKeyData entry[1];
|
||||
|
||||
rel = table_open(ExtensionRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyInit(&entry[0],
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
Anum_pg_extension_oid,
|
||||
#else
|
||||
ObjectIdAttributeNumber,
|
||||
#endif
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(ext_oid));
|
||||
|
||||
scandesc = systable_beginscan(rel, ExtensionOidIndexId, true,
|
||||
SysScanDesc scandesc = systable_beginscan(rel, ExtensionOidIndexId, true,
|
||||
NULL, 1, entry);
|
||||
|
||||
tuple = systable_getnext(scandesc);
|
||||
|
@ -251,7 +246,6 @@ char *
|
|||
pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults,
|
||||
char *accessMethod)
|
||||
{
|
||||
char relationKind = 0;
|
||||
bool firstAttributePrinted = false;
|
||||
AttrNumber defaultValueIndex = 0;
|
||||
AttrNumber constraintIndex = 0;
|
||||
|
@ -361,7 +355,6 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults,
|
|||
defaultString = deparse_expression(defaultNode, defaultContext,
|
||||
false, false);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
if (attributeForm->attgenerated == ATTRIBUTE_GENERATED_STORED)
|
||||
{
|
||||
appendStringInfo(&buffer, " GENERATED ALWAYS AS (%s) STORED",
|
||||
|
@ -371,9 +364,6 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults,
|
|||
{
|
||||
appendStringInfo(&buffer, " DEFAULT %s", defaultString);
|
||||
}
|
||||
#else
|
||||
appendStringInfo(&buffer, " DEFAULT %s", defaultString);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -436,7 +426,7 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults,
|
|||
* If the relation is a foreign table, append the server name and options to
|
||||
* the create table statement.
|
||||
*/
|
||||
relationKind = relation->rd_rel->relkind;
|
||||
char relationKind = relation->rd_rel->relkind;
|
||||
if (relationKind == RELKIND_FOREIGN_TABLE)
|
||||
{
|
||||
ForeignTable *foreignTable = GetForeignTable(tableRelationId);
|
||||
|
@ -452,8 +442,6 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults,
|
|||
appendStringInfo(&buffer, " PARTITION BY %s ", partitioningInformation);
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
|
||||
/*
|
||||
* Add table access methods for pg12 and higher when the table is configured with an
|
||||
* access method
|
||||
|
@ -475,7 +463,6 @@ pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults,
|
|||
appendStringInfo(&buffer, " USING %s", quote_identifier(NameStr(amForm->amname)));
|
||||
ReleaseSysCache(amTup);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Add any reloptions (storage parameters) defined on the table in a WITH
|
||||
|
@ -745,11 +732,7 @@ deparse_shard_reindex_statement(ReindexStmt *origStmt, Oid distrelid, int64 shar
|
|||
{
|
||||
ReindexStmt *reindexStmt = copyObject(origStmt); /* copy to avoid modifications */
|
||||
char *relationName = NULL;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
const char *concurrentlyString = reindexStmt->concurrent ? "CONCURRENTLY " : "";
|
||||
#else
|
||||
const char *concurrentlyString = "";
|
||||
#endif
|
||||
|
||||
|
||||
if (reindexStmt->kind == REINDEX_OBJECT_INDEX ||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -36,11 +36,7 @@
|
|||
#include "distributed/worker_protocol.h"
|
||||
#include "executor/executor.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/planner.h"
|
||||
#endif
|
||||
#include "optimizer/clauses.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/rel.h"
|
||||
|
@ -157,7 +153,6 @@ CitusBeginScan(CustomScanState *node, EState *estate, int eflags)
|
|||
*/
|
||||
EnableWorkerMessagePropagation();
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|
||||
/*
|
||||
* Since we are using a tuplestore we cannot use the virtual tuples postgres had
|
||||
|
@ -176,7 +171,6 @@ CitusBeginScan(CustomScanState *node, EState *estate, int eflags)
|
|||
ExecAssignScanProjectionInfoWithVarno(&node->ss, INDEX_VAR);
|
||||
|
||||
node->ss.ps.qual = ExecInitQual(node->ss.ps.plan->qual, (PlanState *) node);
|
||||
#endif
|
||||
|
||||
DistributedPlan *distributedPlan = scanState->distributedPlan;
|
||||
if (distributedPlan->insertSelectQuery != NULL)
|
||||
|
|
|
@ -272,14 +272,9 @@ WrapTasksForPartitioning(const char *resultIdPrefix, List *selectTaskList,
|
|||
static PartitioningTupleDest *
|
||||
CreatePartitioningTupleDest(CitusTableCacheEntry *targetRelation)
|
||||
{
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
int resultColumnCount = 3;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
tupleDescriptor = CreateTemplateTupleDesc(resultColumnCount);
|
||||
#else
|
||||
tupleDescriptor = CreateTemplateTupleDesc(resultColumnCount, false);
|
||||
#endif
|
||||
TupleDesc tupleDescriptor = CreateTemplateTupleDesc(resultColumnCount);
|
||||
|
||||
TupleDescInitEntry(tupleDescriptor, (AttrNumber) 1, "partition_index",
|
||||
INT4OID, -1, 0);
|
||||
|
@ -686,14 +681,9 @@ QueryStringForFragmentsTransfer(NodeToNodeFragmentsTransfer *fragmentsTransfer)
|
|||
static void
|
||||
ExecuteFetchTaskList(List *taskList)
|
||||
{
|
||||
TupleDesc resultDescriptor = NULL;
|
||||
int resultColumnCount = 1;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
resultDescriptor = CreateTemplateTupleDesc(resultColumnCount);
|
||||
#else
|
||||
resultDescriptor = CreateTemplateTupleDesc(resultColumnCount, false);
|
||||
#endif
|
||||
TupleDesc resultDescriptor = CreateTemplateTupleDesc(resultColumnCount);
|
||||
|
||||
TupleDescInitEntry(resultDescriptor, (AttrNumber) 1, "byte_count", INT8OID, -1, 0);
|
||||
|
||||
|
|
|
@ -100,11 +100,7 @@
|
|||
#include "distributed/worker_protocol.h"
|
||||
#include "executor/tstoreReceiver.h"
|
||||
#include "executor/tuptable.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/planner.h"
|
||||
#endif
|
||||
#include "nodes/params.h"
|
||||
#include "utils/snapmgr.h"
|
||||
|
||||
|
|
|
@ -181,9 +181,6 @@ CitusExecutorRun(QueryDesc *queryDesc,
|
|||
EState *estate = queryDesc->estate;
|
||||
|
||||
estate->es_processed = 0;
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
estate->es_lastoid = InvalidOid;
|
||||
#endif
|
||||
|
||||
/* start and shutdown tuple receiver to simulate empty result */
|
||||
dest->rStartup(queryDesc->dest, CMD_SELECT, queryDesc->tupDesc);
|
||||
|
|
|
@ -280,11 +280,7 @@ TupleDestDestReceiverReceive(TupleTableSlot *slot,
|
|||
Assert(task->queryCount == 1);
|
||||
int queryNumber = 0;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
HeapTuple heapTuple = ExecFetchSlotHeapTuple(slot, true, NULL);
|
||||
#else
|
||||
HeapTuple heapTuple = ExecFetchSlotTuple(slot);
|
||||
#endif
|
||||
|
||||
uint64 tupleLibpqSize = 0;
|
||||
|
||||
|
|
|
@ -548,7 +548,6 @@ SupportedDependencyByCitus(const ObjectAddress *address)
|
|||
*/
|
||||
switch (getObjectClass(address))
|
||||
{
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
case OCLASS_AM:
|
||||
{
|
||||
/*
|
||||
|
@ -559,7 +558,6 @@ SupportedDependencyByCitus(const ObjectAddress *address)
|
|||
*/
|
||||
return IsObjectAddressOwnedByExtension(address, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
case OCLASS_COLLATION:
|
||||
case OCLASS_SCHEMA:
|
||||
|
@ -1188,17 +1186,10 @@ GetDependingView(Form_pg_depend pg_depend)
|
|||
Relation rewriteRel = table_open(RewriteRelationId, AccessShareLock);
|
||||
ScanKeyData rkey[1];
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
ScanKeyInit(&rkey[0],
|
||||
Anum_pg_rewrite_oid,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(pg_depend->objid));
|
||||
#else
|
||||
ScanKeyInit(&rkey[0],
|
||||
ObjectIdAttributeNumber,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(pg_depend->objid));
|
||||
#endif
|
||||
|
||||
SysScanDesc rscan = systable_beginscan(rewriteRel, RewriteOidIndexId,
|
||||
true, NULL, 1, rkey);
|
||||
|
|
|
@ -114,15 +114,11 @@ ObjectExists(const ObjectAddress *address)
|
|||
|
||||
if (is_objectclass_supported(address->classId))
|
||||
{
|
||||
HeapTuple objtup;
|
||||
Relation catalog = table_open(address->classId, AccessShareLock);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
objtup = get_catalog_object_by_oid(catalog, get_object_attnum_oid(
|
||||
address->classId), address->objectId);
|
||||
#else
|
||||
objtup = get_catalog_object_by_oid(catalog, address->objectId);
|
||||
#endif
|
||||
HeapTuple objtup = get_catalog_object_by_oid(catalog, get_object_attnum_oid(
|
||||
address->classId),
|
||||
address->objectId);
|
||||
table_close(catalog, AccessShareLock);
|
||||
if (objtup != NULL)
|
||||
{
|
||||
|
|
|
@ -2552,7 +2552,6 @@ LookupTypeOid(char *schemaNameSting, char *typeNameString)
|
|||
List *qualifiedName = list_make2(schemaName, typeName);
|
||||
TypeName *enumTypeName = makeTypeNameFromNameList(qualifiedName);
|
||||
|
||||
Oid nodeRoleTypId;
|
||||
|
||||
/* typenameTypeId but instead of raising an error return InvalidOid */
|
||||
Type tup = LookupTypeName(NULL, enumTypeName, NULL, false);
|
||||
|
@ -2561,11 +2560,7 @@ LookupTypeOid(char *schemaNameSting, char *typeNameString)
|
|||
return InvalidOid;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
nodeRoleTypId = ((Form_pg_type) GETSTRUCT(tup))->oid;
|
||||
#else
|
||||
nodeRoleTypId = HeapTupleGetOid(tup);
|
||||
#endif
|
||||
Oid nodeRoleTypId = ((Form_pg_type) GETSTRUCT(tup))->oid;
|
||||
ReleaseSysCache(tup);
|
||||
|
||||
return nodeRoleTypId;
|
||||
|
|
|
@ -17,9 +17,7 @@
|
|||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/genam.h"
|
||||
#endif
|
||||
#include "access/htup_details.h"
|
||||
#include "access/sysattr.h"
|
||||
#include "access/xact.h"
|
||||
|
|
|
@ -48,21 +48,14 @@
|
|||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/worker_transaction.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#endif
|
||||
#include "nodes/nodes.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "nodes/primnodes.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "nodes/relation.h"
|
||||
#include "optimizer/predtest.h"
|
||||
#endif
|
||||
#include "optimizer/restrictinfo.h"
|
||||
#include "storage/lock.h"
|
||||
#include "storage/lmgr.h"
|
||||
|
|
|
@ -48,12 +48,7 @@
|
|||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/worker_transaction.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/predtest.h"
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "optimizer/restrictinfo.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "tcop/tcopprot.h"
|
||||
|
|
|
@ -451,15 +451,12 @@ Datum
|
|||
citus_get_active_worker_nodes(PG_FUNCTION_ARGS)
|
||||
{
|
||||
FuncCallContext *functionContext = NULL;
|
||||
uint32 workerNodeIndex = 0;
|
||||
uint32 workerNodeCount = 0;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
if (SRF_IS_FIRSTCALL())
|
||||
{
|
||||
TupleDesc tupleDescriptor = NULL;
|
||||
|
||||
/* create a function context for cross-call persistence */
|
||||
functionContext = SRF_FIRSTCALL_INIT();
|
||||
|
||||
|
@ -477,11 +474,7 @@ citus_get_active_worker_nodes(PG_FUNCTION_ARGS)
|
|||
* This tuple descriptor must match the output parameters declared for
|
||||
* the function in pg_proc.
|
||||
*/
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
tupleDescriptor = CreateTemplateTupleDesc(WORKER_NODE_FIELDS, false);
|
||||
#else
|
||||
tupleDescriptor = CreateTemplateTupleDesc(WORKER_NODE_FIELDS);
|
||||
#endif
|
||||
TupleDesc tupleDescriptor = CreateTemplateTupleDesc(WORKER_NODE_FIELDS);
|
||||
TupleDescInitEntry(tupleDescriptor, (AttrNumber) 1, "node_name",
|
||||
TEXTOID, -1, 0);
|
||||
TupleDescInitEntry(tupleDescriptor, (AttrNumber) 2, "node_port",
|
||||
|
@ -493,7 +486,7 @@ citus_get_active_worker_nodes(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
functionContext = SRF_PERCALL_SETUP();
|
||||
workerNodeIndex = functionContext->call_cntr;
|
||||
uint32 workerNodeIndex = functionContext->call_cntr;
|
||||
workerNodeCount = functionContext->max_calls;
|
||||
|
||||
if (workerNodeIndex < workerNodeCount)
|
||||
|
@ -668,7 +661,6 @@ GetPreLoadTableCreationCommands(Oid relationId, bool includeSequenceDefaults,
|
|||
tableColumnOptionsDef));
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= 120000
|
||||
|
||||
/* add columnar options for cstore tables */
|
||||
if (accessMethod == NULL && IsColumnarTableAmTable(relationId))
|
||||
|
@ -679,7 +671,6 @@ GetPreLoadTableCreationCommands(Oid relationId, bool includeSequenceDefaults,
|
|||
tableDDLEventList = lappend(tableDDLEventList, cstoreOptionsDDL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
char *tableOwnerDef = TableOwnerResetCommand(relationId);
|
||||
if (tableOwnerDef != NULL)
|
||||
|
|
|
@ -346,11 +346,7 @@ FindCitusExtradataContainerRTE(Node *node, RangeTblEntry **result)
|
|||
}
|
||||
else if (IsA(node, Query))
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
const int flags = QTW_EXAMINE_RTES_BEFORE;
|
||||
#else
|
||||
const int flags = QTW_EXAMINE_RTES;
|
||||
#endif
|
||||
return query_tree_walker((Query *) node, FindCitusExtradataContainerRTE, result,
|
||||
flags);
|
||||
}
|
||||
|
|
|
@ -7,8 +7,6 @@
|
|||
* planning, the query pushdown planning, kicks in and the CTEs can actually
|
||||
* be pushed down as long as it is safe to pushdown as a subquery.
|
||||
*
|
||||
* Most of the logic in this function is inspired (and some is copy & pasted)
|
||||
* from PostgreSQL 12's CTE inlining feature.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*-------------------------------------------------------------------------
|
||||
|
@ -18,29 +16,9 @@
|
|||
|
||||
#include "distributed/cte_inline.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/cost.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#endif
|
||||
#include "rewrite/rewriteManip.h"
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
|
||||
/* copy & paste from PG 12 */
|
||||
#define PG_12_QTW_EXAMINE_RTES_BEFORE 0x10
|
||||
#define PG_12_QTW_EXAMINE_RTES_AFTER 0x20
|
||||
bool pg_12_query_tree_walker(Query *query,
|
||||
bool (*walker)(),
|
||||
void *context,
|
||||
int flags);
|
||||
bool pg_12_range_table_walker(List *rtable,
|
||||
bool (*walker)(),
|
||||
void *context,
|
||||
int flags);
|
||||
#endif
|
||||
|
||||
typedef struct inline_cte_walker_context
|
||||
{
|
||||
const char *ctename; /* name and relative level of target CTE */
|
||||
|
@ -233,18 +211,9 @@ PostgreSQLCTEInlineCondition(CommonTableExpr *cte, CmdType cmdType)
|
|||
* will be inlined even if multiply referenced.
|
||||
*/
|
||||
if (
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
(cte->ctematerialized == CTEMaterializeNever ||
|
||||
(cte->ctematerialized == CTEMaterializeDefault &&
|
||||
cte->cterefcount == 1)) &&
|
||||
#else
|
||||
|
||||
/*
|
||||
* If referenced only once inlining would probably perform
|
||||
* better, so for pg < 12, try inlining
|
||||
*/
|
||||
cte->cterefcount == 1 &&
|
||||
#endif
|
||||
!cte->cterecursive &&
|
||||
cmdType == CMD_SELECT &&
|
||||
!contain_dml(cte->ctequery) &&
|
||||
|
@ -294,18 +263,8 @@ inline_cte_walker(Node *node, inline_cte_walker_context *context)
|
|||
|
||||
context->levelsup++;
|
||||
|
||||
/*
|
||||
* Visit the query's RTE nodes after their contents; otherwise
|
||||
* query_tree_walker would descend into the newly inlined CTE query,
|
||||
* which we don't want.
|
||||
*/
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
(void) pg_12_query_tree_walker(query, inline_cte_walker, context,
|
||||
PG_12_QTW_EXAMINE_RTES_AFTER);
|
||||
#else
|
||||
(void) query_tree_walker(query, inline_cte_walker, context,
|
||||
QTW_EXAMINE_RTES_AFTER);
|
||||
#endif
|
||||
context->levelsup--;
|
||||
|
||||
return false;
|
||||
|
@ -411,123 +370,4 @@ contain_dml_walker(Node *node, void *context)
|
|||
return expression_tree_walker(node, contain_dml_walker, context);
|
||||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
/*
|
||||
* pg_12_query_tree_walker is copied from Postgres 12's source
|
||||
* code. The only difference between query_tree_walker the new
|
||||
* two flags added in range_table_walker: QTW_EXAMINE_RTES_AFTER
|
||||
* and QTW_EXAMINE_RTES_BEFORE.
|
||||
*/
|
||||
bool
|
||||
pg_12_query_tree_walker(Query *query,
|
||||
bool (*walker) (),
|
||||
void *context,
|
||||
int flags)
|
||||
{
|
||||
Assert(query != NULL && IsA(query, Query));
|
||||
|
||||
if (walker((Node *) query->targetList, context))
|
||||
return true;
|
||||
if (walker((Node *) query->withCheckOptions, context))
|
||||
return true;
|
||||
if (walker((Node *) query->onConflict, context))
|
||||
return true;
|
||||
if (walker((Node *) query->returningList, context))
|
||||
return true;
|
||||
if (walker((Node *) query->jointree, context))
|
||||
return true;
|
||||
if (walker(query->setOperations, context))
|
||||
return true;
|
||||
if (walker(query->havingQual, context))
|
||||
return true;
|
||||
if (walker(query->limitOffset, context))
|
||||
return true;
|
||||
if (walker(query->limitCount, context))
|
||||
return true;
|
||||
if (!(flags & QTW_IGNORE_CTE_SUBQUERIES))
|
||||
{
|
||||
if (walker((Node *) query->cteList, context))
|
||||
return true;
|
||||
}
|
||||
if (!(flags & QTW_IGNORE_RANGE_TABLE))
|
||||
{
|
||||
if (pg_12_range_table_walker(query->rtable, walker, context, flags))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* pg_12_range_table_walker is copied from Postgres 12's source
|
||||
* code. The only difference between range_table_walker the new
|
||||
* two flags added in range_table_walker: QTW_EXAMINE_RTES_AFTER
|
||||
* and QTW_EXAMINE_RTES_BEFORE.
|
||||
*/
|
||||
bool
|
||||
pg_12_range_table_walker(List *rtable,
|
||||
bool (*walker) (),
|
||||
void *context,
|
||||
int flags)
|
||||
{
|
||||
ListCell *rt;
|
||||
|
||||
foreach(rt, rtable)
|
||||
{
|
||||
RangeTblEntry *rte = (RangeTblEntry *) lfirst(rt);
|
||||
|
||||
/*
|
||||
* Walkers might need to examine the RTE node itself either before or
|
||||
* after visiting its contents (or, conceivably, both). Note that if
|
||||
* you specify neither flag, the walker won't visit the RTE at all.
|
||||
*/
|
||||
if (flags & PG_12_QTW_EXAMINE_RTES_BEFORE)
|
||||
if (walker(rte, context))
|
||||
return true;
|
||||
|
||||
switch (rte->rtekind)
|
||||
{
|
||||
case RTE_RELATION:
|
||||
if (walker(rte->tablesample, context))
|
||||
return true;
|
||||
break;
|
||||
case RTE_CTE:
|
||||
case RTE_NAMEDTUPLESTORE:
|
||||
/* nothing to do */
|
||||
break;
|
||||
case RTE_SUBQUERY:
|
||||
if (!(flags & QTW_IGNORE_RT_SUBQUERIES))
|
||||
if (walker(rte->subquery, context))
|
||||
return true;
|
||||
break;
|
||||
case RTE_JOIN:
|
||||
if (!(flags & QTW_IGNORE_JOINALIASES))
|
||||
if (walker(rte->joinaliasvars, context))
|
||||
return true;
|
||||
break;
|
||||
case RTE_FUNCTION:
|
||||
if (walker(rte->functions, context))
|
||||
return true;
|
||||
break;
|
||||
case RTE_TABLEFUNC:
|
||||
if (walker(rte->tablefunc, context))
|
||||
return true;
|
||||
break;
|
||||
case RTE_VALUES:
|
||||
if (walker(rte->values_lists, context))
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (walker(rte->securityQuals, context))
|
||||
return true;
|
||||
|
||||
if (flags & PG_12_QTW_EXAMINE_RTES_AFTER)
|
||||
if (walker(rte, context))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* *INDENT-ON* */
|
||||
|
|
|
@ -52,12 +52,8 @@
|
|||
#include "nodes/pg_list.h"
|
||||
#include "parser/parsetree.h"
|
||||
#include "parser/parse_type.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#include "optimizer/plancat.h"
|
||||
#else
|
||||
#include "optimizer/cost.h"
|
||||
#endif
|
||||
#include "optimizer/pathnode.h"
|
||||
#include "optimizer/planner.h"
|
||||
#include "optimizer/planmain.h"
|
||||
|
@ -751,24 +747,6 @@ InlineCtesAndCreateDistributedPlannedStmt(uint64 planId,
|
|||
/* after inlining, we shouldn't have any inlinable CTEs */
|
||||
Assert(!QueryTreeContainsInlinableCTE(copyOfOriginalQuery));
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
Query *query = planContext->query;
|
||||
|
||||
/*
|
||||
* We had to implement this hack because on Postgres11 and below, the originalQuery
|
||||
* and the query would have significant differences in terms of CTEs where CTEs
|
||||
* would not be inlined on the query (as standard_planner() wouldn't inline CTEs
|
||||
* on PG 11 and below).
|
||||
*
|
||||
* Instead, we prefer to pass the inlined query to the distributed planning. We rely
|
||||
* on the fact that the query includes subqueries, and it'd definitely go through
|
||||
* query pushdown planning. During query pushdown planning, the only relevant query
|
||||
* tree is the original query.
|
||||
*/
|
||||
planContext->query = copyObject(copyOfOriginalQuery);
|
||||
#endif
|
||||
|
||||
|
||||
/* simply recurse into CreateDistributedPlannedStmt() in a PG_TRY() block */
|
||||
PlannedStmt *result = TryCreateDistributedPlannedStmt(planContext->plan,
|
||||
copyOfOriginalQuery,
|
||||
|
@ -777,15 +755,6 @@ InlineCtesAndCreateDistributedPlannedStmt(uint64 planId,
|
|||
planContext->
|
||||
plannerRestrictionContext);
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
|
||||
/*
|
||||
* Set back the original query, in case the planning failed and we need to go
|
||||
* into distributed planning again.
|
||||
*/
|
||||
planContext->query = query;
|
||||
#endif
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1509,15 +1478,10 @@ BlessRecordExpression(Expr *expr)
|
|||
* Handle row expressions, e.g. SELECT (1,2);
|
||||
*/
|
||||
RowExpr *rowExpr = (RowExpr *) expr;
|
||||
TupleDesc rowTupleDesc = NULL;
|
||||
ListCell *argCell = NULL;
|
||||
int currentResno = 1;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
rowTupleDesc = CreateTemplateTupleDesc(list_length(rowExpr->args));
|
||||
#else
|
||||
rowTupleDesc = CreateTemplateTupleDesc(list_length(rowExpr->args), false);
|
||||
#endif
|
||||
TupleDesc rowTupleDesc = CreateTemplateTupleDesc(list_length(rowExpr->args));
|
||||
|
||||
foreach(argCell, rowExpr->args)
|
||||
{
|
||||
|
@ -2015,16 +1979,11 @@ AdjustReadIntermediateResultsCostInternal(RelOptInfo *relOptInfo, List *columnTy
|
|||
{
|
||||
PathTarget *reltarget = relOptInfo->reltarget;
|
||||
List *pathList = relOptInfo->pathlist;
|
||||
Path *path = NULL;
|
||||
double rowCost = 0.;
|
||||
double rowSizeEstimate = 0;
|
||||
double rowCountEstimate = 0.;
|
||||
double ioCost = 0.;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
QualCost funcCost = { 0., 0. };
|
||||
#else
|
||||
double funcCost = 0.;
|
||||
#endif
|
||||
int64 totalResultSize = 0;
|
||||
ListCell *typeCell = NULL;
|
||||
|
||||
|
@ -2083,17 +2042,9 @@ AdjustReadIntermediateResultsCostInternal(RelOptInfo *relOptInfo, List *columnTy
|
|||
|
||||
|
||||
/* add the cost of parsing a column */
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
add_function_cost(NULL, inputFunctionId, NULL, &funcCost);
|
||||
#else
|
||||
funcCost += get_func_cost(inputFunctionId);
|
||||
#endif
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
rowCost += funcCost.per_tuple;
|
||||
#else
|
||||
rowCost += funcCost * cpu_operator_cost;
|
||||
#endif
|
||||
|
||||
/* estimate the number of rows based on the file size and estimated row size */
|
||||
rowCountEstimate = Max(1, (double) totalResultSize / rowSizeEstimate);
|
||||
|
@ -2104,13 +2055,11 @@ AdjustReadIntermediateResultsCostInternal(RelOptInfo *relOptInfo, List *columnTy
|
|||
Assert(pathList != NIL);
|
||||
|
||||
/* tell the planner about the cost and row count of the function */
|
||||
path = (Path *) linitial(pathList);
|
||||
Path *path = (Path *) linitial(pathList);
|
||||
path->rows = rowCountEstimate;
|
||||
path->total_cost = rowCountEstimate * rowCost + ioCost;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
path->startup_cost = funcCost.startup + relOptInfo->baserestrictcost.startup;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -16,11 +16,7 @@
|
|||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/multi_logical_optimizer.h"
|
||||
#include "distributed/pg_dist_partition.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "optimizer/restrictinfo.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#include "nodes/pg_list.h"
|
||||
|
|
|
@ -44,17 +44,11 @@
|
|||
#include "distributed/pg_dist_partition.h"
|
||||
#include "distributed/shardinterval_utils.h"
|
||||
#include "distributed/shard_pruning.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/makefuncs.h"
|
||||
#endif
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/clauses.h"
|
||||
#endif
|
||||
#include "tcop/pquery.h"
|
||||
|
||||
bool EnableFastPathRouterPlanner = true;
|
||||
|
|
|
@ -43,9 +43,7 @@
|
|||
#include "nodes/primnodes.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#include "parser/parse_coerce.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "parser/parsetree.h"
|
||||
#endif
|
||||
#include "miscadmin.h"
|
||||
#include "tcop/dest.h"
|
||||
#include "utils/lsyscache.h"
|
||||
|
@ -102,18 +100,8 @@ contain_param_walker(Node *node, void *context)
|
|||
PlannedStmt *
|
||||
TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
|
||||
{
|
||||
List *targetList = NIL;
|
||||
TargetEntry *targetEntry = NULL;
|
||||
FuncExpr *funcExpr = NULL;
|
||||
DistObjectCacheEntry *procedure = NULL;
|
||||
Oid colocatedRelationId = InvalidOid;
|
||||
bool colocatedWithReferenceTable = false;
|
||||
CitusTableCacheEntry *distTable = NULL;
|
||||
Var *partitionColumn = NULL;
|
||||
ShardPlacement *placement = NULL;
|
||||
WorkerNode *workerNode = NULL;
|
||||
Task *task = NULL;
|
||||
Job *job = NULL;
|
||||
DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan);
|
||||
struct ParamWalkerContext walkerParamContext = { 0 };
|
||||
|
||||
|
@ -157,8 +145,6 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
|
|||
|
||||
if (joinTree->fromlist != NIL)
|
||||
{
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|
||||
/*
|
||||
* In pg12's planning phase empty FROMs are represented with an RTE_RESULT.
|
||||
* When we arrive here, standard_planner has already been called which calls
|
||||
|
@ -194,29 +180,25 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
|
|||
Assert(list_length(joinTree->fromlist) > 1);
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
|
||||
/* query has a FROM section */
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
targetList = planContext->query->targetList;
|
||||
List *targetList = planContext->query->targetList;
|
||||
if (list_length(planContext->query->targetList) != 1)
|
||||
{
|
||||
/* multiple target list items */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
targetEntry = (TargetEntry *) linitial(targetList);
|
||||
TargetEntry *targetEntry = (TargetEntry *) linitial(targetList);
|
||||
if (!IsA(targetEntry->expr, FuncExpr))
|
||||
{
|
||||
/* target list item is not a function call */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
funcExpr = (FuncExpr *) targetEntry->expr;
|
||||
procedure = LookupDistObjectCacheEntry(ProcedureRelationId, funcExpr->funcid, 0);
|
||||
FuncExpr *funcExpr = (FuncExpr *) targetEntry->expr;
|
||||
DistObjectCacheEntry *procedure = LookupDistObjectCacheEntry(ProcedureRelationId,
|
||||
funcExpr->funcid, 0);
|
||||
if (procedure == NULL || !procedure->isDistributed)
|
||||
{
|
||||
/* not a distributed function call */
|
||||
|
@ -252,15 +234,15 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
colocatedRelationId = ColocatedTableId(procedure->colocationId);
|
||||
Oid colocatedRelationId = ColocatedTableId(procedure->colocationId);
|
||||
if (colocatedRelationId == InvalidOid)
|
||||
{
|
||||
ereport(DEBUG1, (errmsg("function does not have co-located tables")));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
distTable = GetCitusTableCacheEntry(colocatedRelationId);
|
||||
partitionColumn = distTable->partitionColumn;
|
||||
CitusTableCacheEntry *distTable = GetCitusTableCacheEntry(colocatedRelationId);
|
||||
Var *partitionColumn = distTable->partitionColumn;
|
||||
if (partitionColumn == NULL)
|
||||
{
|
||||
colocatedWithReferenceTable = true;
|
||||
|
@ -295,7 +277,7 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
|
|||
return false;
|
||||
}
|
||||
|
||||
workerNode = FindWorkerNode(placement->nodeName, placement->nodePort);
|
||||
WorkerNode *workerNode = FindWorkerNode(placement->nodeName, placement->nodePort);
|
||||
|
||||
if (workerNode == NULL || !workerNode->hasMetadata || !workerNode->metadataSynced)
|
||||
{
|
||||
|
@ -334,14 +316,14 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext)
|
|||
|
||||
ereport(DEBUG1, (errmsg("pushing down the function call")));
|
||||
|
||||
task = CitusMakeNode(Task);
|
||||
Task *task = CitusMakeNode(Task);
|
||||
task->taskType = READ_TASK;
|
||||
task->taskPlacementList = list_make1(placement);
|
||||
SetTaskQueryIfShouldLazyDeparse(task, planContext->query);
|
||||
task->anchorShardId = placement->shardId;
|
||||
task->replicationModel = distTable->replicationModel;
|
||||
|
||||
job = CitusMakeNode(Job);
|
||||
Job *job = CitusMakeNode(Job);
|
||||
job->jobId = UniqueJobId();
|
||||
job->jobQuery = planContext->query;
|
||||
job->taskList = list_make1(task);
|
||||
|
|
|
@ -40,11 +40,7 @@
|
|||
#include "optimizer/planner.h"
|
||||
#include "optimizer/restrictinfo.h"
|
||||
#include "optimizer/tlist.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "parser/parsetree.h"
|
||||
#include "parser/parse_coerce.h"
|
||||
#include "parser/parse_relation.h"
|
||||
|
|
|
@ -103,11 +103,7 @@
|
|||
#include "distributed/version_compat.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "optimizer/planner.h"
|
||||
#include "optimizer/prep.h"
|
||||
#include "parser/parsetree.h"
|
||||
|
@ -117,11 +113,7 @@
|
|||
#include "nodes/nodeFuncs.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "nodes/primnodes.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#else
|
||||
#include "nodes/relation.h"
|
||||
#endif
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/guc.h"
|
||||
#include "utils/lsyscache.h"
|
||||
|
|
|
@ -18,11 +18,7 @@
|
|||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/planner.h"
|
||||
#endif
|
||||
#include "optimizer/clauses.h"
|
||||
|
||||
|
||||
|
|
|
@ -1273,11 +1273,7 @@ CreateExplainAnlyzeDestination(Task *task, TupleDestination *taskDest)
|
|||
tupleDestination->originalTask = task;
|
||||
tupleDestination->originalTaskDestination = taskDest;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
TupleDesc lastSavedExplainAnalyzeTupDesc = CreateTemplateTupleDesc(2);
|
||||
#else
|
||||
TupleDesc lastSavedExplainAnalyzeTupDesc = CreateTemplateTupleDesc(2, false);
|
||||
#endif
|
||||
|
||||
TupleDescInitEntry(lastSavedExplainAnalyzeTupDesc, 1, "explain analyze", TEXTOID, 0,
|
||||
0);
|
||||
|
|
|
@ -28,11 +28,7 @@
|
|||
#include "distributed/pg_dist_partition.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "utils/builtins.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#include "utils/builtins.h"
|
||||
|
|
|
@ -47,11 +47,7 @@
|
|||
#include "nodes/nodeFuncs.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#include "optimizer/tlist.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "parser/parse_agg.h"
|
||||
#include "parser/parse_coerce.h"
|
||||
#include "parser/parse_oper.h"
|
||||
|
@ -3608,11 +3604,7 @@ AggregateFunctionOid(const char *functionName, Oid inputType)
|
|||
if (procForm->proargtypes.values[0] == inputType ||
|
||||
procForm->proargtypes.values[0] == ANYELEMENTOID)
|
||||
{
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
functionOid = HeapTupleGetOid(heapTuple);
|
||||
#else
|
||||
functionOid = procForm->oid;
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,13 +38,8 @@
|
|||
#include "distributed/version_compat.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "nodes/relation.h"
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "optimizer/clauses.h"
|
||||
#include "optimizer/prep.h"
|
||||
#include "optimizer/tlist.h"
|
||||
|
|
|
@ -61,13 +61,8 @@
|
|||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "nodes/relation.h"
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "optimizer/restrictinfo.h"
|
||||
#include "optimizer/tlist.h"
|
||||
#include "parser/parse_relation.h"
|
||||
|
|
|
@ -65,12 +65,7 @@
|
|||
#include "optimizer/joininfo.h"
|
||||
#include "optimizer/pathnode.h"
|
||||
#include "optimizer/paths.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/var.h"
|
||||
#include "optimizer/predtest.h"
|
||||
#endif
|
||||
#include "optimizer/restrictinfo.h"
|
||||
#include "parser/parsetree.h"
|
||||
#include "parser/parse_oper.h"
|
||||
|
@ -967,10 +962,8 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer
|
|||
|
||||
queryTableCount++;
|
||||
}
|
||||
else if (rangeTableEntry->rtekind == RTE_VALUES
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|| rangeTableEntry->rtekind == RTE_RESULT
|
||||
#endif
|
||||
else if (rangeTableEntry->rtekind == RTE_VALUES ||
|
||||
rangeTableEntry->rtekind == RTE_RESULT
|
||||
)
|
||||
{
|
||||
/* do nothing, this type is supported */
|
||||
|
|
|
@ -23,11 +23,7 @@
|
|||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/relation.h"
|
||||
#else
|
||||
#include "access/heapam.h"
|
||||
#endif
|
||||
#include "distributed/multi_logical_planner.h"
|
||||
#include "distributed/query_colocation_checker.h"
|
||||
#include "distributed/pg_dist_partition.h"
|
||||
|
|
|
@ -38,12 +38,8 @@
|
|||
#include "distributed/relation_restriction_equivalence.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "nodes/pg_list.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#include "parser/parsetree.h"
|
||||
|
@ -235,8 +231,6 @@ HasEmptyJoinTree(Query *query)
|
|||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
else if (list_length(query->rtable) == 1)
|
||||
{
|
||||
RangeTblEntry *rte = (RangeTblEntry *) linitial(query->rtable);
|
||||
|
@ -245,7 +239,6 @@ HasEmptyJoinTree(Query *query)
|
|||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -1153,10 +1146,8 @@ DeferErrorIfUnsupportedTableCombination(Query *queryTree)
|
|||
* subquery, or immutable function.
|
||||
*/
|
||||
if (rangeTableEntry->rtekind == RTE_RELATION ||
|
||||
rangeTableEntry->rtekind == RTE_SUBQUERY
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|| rangeTableEntry->rtekind == RTE_RESULT
|
||||
#endif
|
||||
rangeTableEntry->rtekind == RTE_SUBQUERY ||
|
||||
rangeTableEntry->rtekind == RTE_RESULT
|
||||
)
|
||||
{
|
||||
/* accepted */
|
||||
|
@ -1472,13 +1463,11 @@ HasRecurringTuples(Node *node, RecurringTuplesType *recurType)
|
|||
*/
|
||||
return true;
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
else if (rangeTableEntry->rtekind == RTE_RESULT)
|
||||
{
|
||||
*recurType = RECURRING_TUPLES_EMPTY_JOIN_TREE;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -77,11 +77,7 @@
|
|||
#include "distributed/version_compat.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "optimizer/planner.h"
|
||||
#include "optimizer/prep.h"
|
||||
#include "parser/parsetree.h"
|
||||
|
@ -91,11 +87,7 @@
|
|||
#include "nodes/nodeFuncs.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "nodes/primnodes.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#else
|
||||
#include "nodes/relation.h"
|
||||
#endif
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/guc.h"
|
||||
#include "utils/lsyscache.h"
|
||||
|
|
|
@ -28,14 +28,8 @@
|
|||
#include "nodes/nodeFuncs.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "nodes/primnodes.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#include "optimizer/optimizer.h"
|
||||
#else
|
||||
#include "optimizer/cost.h"
|
||||
#include "nodes/relation.h"
|
||||
#include "optimizer/var.h"
|
||||
#endif
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "optimizer/paths.h"
|
||||
#include "parser/parsetree.h"
|
||||
|
|
|
@ -188,16 +188,12 @@ typedef struct PendingPruningInstance
|
|||
PruningTreeNode *continueAt;
|
||||
} PendingPruningInstance;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
typedef union \
|
||||
{ \
|
||||
FunctionCallInfoBaseData fcinfo; \
|
||||
/* ensure enough space for nargs args is available */ \
|
||||
char fcinfo_data[SizeForFunctionCallInfo(2)]; \
|
||||
} FunctionCall2InfoData;
|
||||
#else
|
||||
typedef FunctionCallInfoData FunctionCall2InfoData;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We also ignore this warning in ./configure, but that's not always enough.
|
||||
|
|
|
@ -125,7 +125,6 @@ static void CitusAuthHook(Port *port, int status);
|
|||
|
||||
static ClientAuthentication_hook_type original_client_auth_hook = NULL;
|
||||
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
/* GUC enum definitions */
|
||||
static const struct config_enum_entry propagate_set_commands_options[] = {
|
||||
|
@ -1076,8 +1075,9 @@ RegisterCitusConfigVariables(void)
|
|||
*/
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_cte_inlining",
|
||||
gettext_noop("When set to false, CTE inlining feature is disabled"),
|
||||
gettext_noop("This feature is not intended for users. It is developed "
|
||||
gettext_noop("When set to false, CTE inlining feature is disabled."),
|
||||
gettext_noop(
|
||||
"This feature is not intended for users and it is deprecated. It is developed "
|
||||
"to get consistent regression test outputs between Postgres 11"
|
||||
"and Postgres 12. In Postgres 12+, the user can control the behaviour"
|
||||
"by [NOT] MATERIALIZED keyword on CTEs. However, in PG 11, we cannot do "
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
|
||||
#include "access/amapi.h"
|
||||
#include "access/heapam.h"
|
||||
|
@ -586,6 +585,3 @@ fake_am_handler(PG_FUNCTION_ARGS)
|
|||
{
|
||||
PG_RETURN_POINTER(&fake_methods);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -25,11 +25,7 @@
|
|||
#include "nodes/nodes.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "nodes/plannodes.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#else
|
||||
#include "nodes/relation.h"
|
||||
#endif
|
||||
#include "optimizer/pathnode.h"
|
||||
#include "optimizer/planmain.h"
|
||||
#include "optimizer/restrictinfo.h"
|
||||
|
|
|
@ -28,10 +28,8 @@
|
|||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/resource_lock.h"
|
||||
#include "distributed/shard_pruning.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/makefuncs.h"
|
||||
#include "nodes/nodeFuncs.h"
|
||||
#endif
|
||||
#include "nodes/nodes.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "nodes/primnodes.h"
|
||||
|
|
|
@ -32,9 +32,7 @@
|
|||
#include "distributed/tuplestore.h"
|
||||
#include "nodes/execnodes.h"
|
||||
#include "postmaster/autovacuum.h" /* to access autovacuum_max_workers */
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "replication/walsender.h"
|
||||
#endif
|
||||
#include "storage/ipc.h"
|
||||
#include "storage/lmgr.h"
|
||||
#include "storage/lwlock.h"
|
||||
|
@ -598,9 +596,7 @@ TotalProcCount(void)
|
|||
*/
|
||||
totalProcs = maxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
totalProcs += max_wal_senders;
|
||||
#endif
|
||||
|
||||
return totalProcs;
|
||||
}
|
||||
|
|
|
@ -22,9 +22,7 @@
|
|||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/genam.h"
|
||||
#endif
|
||||
#include "access/heapam.h"
|
||||
#include "access/htup_details.h"
|
||||
#include "access/relscan.h"
|
||||
|
|
|
@ -21,9 +21,7 @@
|
|||
#include "nodes/nodes.h"
|
||||
#include "nodes/primnodes.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "optimizer/optimizer.h"
|
||||
#endif
|
||||
#include "optimizer/planmain.h"
|
||||
#include "utils/datum.h"
|
||||
#include "utils/lsyscache.h"
|
||||
|
|
|
@ -310,9 +310,7 @@ GetRangeTblKind(RangeTblEntry *rte)
|
|||
case RTE_JOIN:
|
||||
case RTE_VALUES:
|
||||
case RTE_CTE:
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
case RTE_RESULT:
|
||||
#endif
|
||||
{
|
||||
rteKind = (CitusRTEKind) rte->rtekind;
|
||||
break;
|
||||
|
|
|
@ -34,11 +34,7 @@
|
|||
#include "distributed/metadata_utility.h"
|
||||
#include "lib/stringinfo.h"
|
||||
#include "nodes/plannodes.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#else
|
||||
#include "nodes/relation.h"
|
||||
#endif
|
||||
#include "utils/datum.h"
|
||||
|
||||
|
||||
|
|
|
@ -23,17 +23,7 @@
|
|||
#include "distributed/citus_safe_lib.h"
|
||||
#include "lib/stringinfo.h"
|
||||
|
||||
/*
|
||||
* In PG 11 pg_vsnprintf is not exported and compiled in most cases, in that
|
||||
* case use the copied one from pg11_snprintf.c
|
||||
* NOTE: Whenever removing this section also remove pg11_snprintf.c
|
||||
*/
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
extern int pg11_vsnprintf(char *str, size_t count, const char *fmt, va_list args);
|
||||
#define citus_vsnprintf pg11_vsnprintf
|
||||
#else
|
||||
#define citus_vsnprintf pg_vsnprintf
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -14,14 +14,10 @@
|
|||
|
||||
#include "distributed/pg_version_constants.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/genam.h"
|
||||
#endif
|
||||
#include "access/htup_details.h"
|
||||
#include "access/stratnum.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/table.h"
|
||||
#endif
|
||||
#include "catalog/pg_constraint.h"
|
||||
#include "distributed/commands.h"
|
||||
#include "distributed/foreign_key_relationship.h"
|
||||
|
@ -37,9 +33,6 @@
|
|||
#include "common/hashfn.h"
|
||||
#endif
|
||||
#include "utils/memutils.h"
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
#include "utils/rel.h"
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -35,9 +35,7 @@
|
|||
#include "lib/stringinfo.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "pgstat.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "partitioning/partdesc.h"
|
||||
#endif
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/fmgroids.h"
|
||||
#include "utils/lsyscache.h"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -81,7 +81,6 @@ PG_FUNCTION_INFO_V1(worker_append_table_to_shard);
|
|||
* Following UDFs are stub functions, you can check their comments for more
|
||||
* detail.
|
||||
*/
|
||||
PG_FUNCTION_INFO_V1(worker_fetch_query_results_file);
|
||||
PG_FUNCTION_INFO_V1(worker_fetch_regular_table);
|
||||
PG_FUNCTION_INFO_V1(worker_fetch_foreign_file);
|
||||
PG_FUNCTION_INFO_V1(master_expire_table_cache);
|
||||
|
@ -814,20 +813,6 @@ SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* worker_fetch_query_results_file is a stub UDF to allow the function object
|
||||
* to be re-created during upgrades. We should keep this around until we drop
|
||||
* support for Postgres 11, since Postgres 11 is the highest version for which
|
||||
* this object may have been created.
|
||||
*/
|
||||
Datum
|
||||
worker_fetch_query_results_file(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ereport(DEBUG2, (errmsg("this function is deprecated and no longer is used")));
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* worker_fetch_regular_table UDF is a stub UDF to install Citus flawlessly.
|
||||
* Otherwise we need to delete them from our sql files, which is confusing
|
||||
|
|
|
@ -20,10 +20,8 @@
|
|||
#include "funcapi.h"
|
||||
#include "miscadmin.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "access/genam.h"
|
||||
#include "access/table.h"
|
||||
#endif
|
||||
#include "access/htup_details.h"
|
||||
#include "access/xact.h"
|
||||
#include "catalog/dependency.h"
|
||||
|
@ -266,26 +264,16 @@ worker_merge_files_and_run_query(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
worker_cleanup_job_schema_cache(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Relation pgNamespace = NULL;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
TableScanDesc scanDescriptor = NULL;
|
||||
#else
|
||||
HeapScanDesc scanDescriptor = NULL;
|
||||
#endif
|
||||
ScanKey scanKey = NULL;
|
||||
int scanKeyCount = 0;
|
||||
HeapTuple heapTuple = NULL;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
pgNamespace = table_open(NamespaceRelationId, AccessExclusiveLock);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
scanDescriptor = table_beginscan_catalog(pgNamespace, scanKeyCount, scanKey);
|
||||
#else
|
||||
scanDescriptor = heap_beginscan_catalog(pgNamespace, scanKeyCount, scanKey);
|
||||
#endif
|
||||
Relation pgNamespace = table_open(NamespaceRelationId, AccessExclusiveLock);
|
||||
TableScanDesc scanDescriptor = table_beginscan_catalog(pgNamespace, scanKeyCount,
|
||||
scanKey);
|
||||
|
||||
heapTuple = heap_getnext(scanDescriptor, ForwardScanDirection);
|
||||
HeapTuple heapTuple = heap_getnext(scanDescriptor, ForwardScanDirection);
|
||||
while (HeapTupleIsValid(heapTuple))
|
||||
{
|
||||
Form_pg_namespace schemaForm = (Form_pg_namespace) GETSTRUCT(heapTuple);
|
||||
|
|
|
@ -17,17 +17,6 @@
|
|||
#define ExplainPropertyLong(qlabel, value, es) \
|
||||
ExplainPropertyInteger(qlabel, NULL, value, es)
|
||||
|
||||
#if PG_VERSION_NUM < 120000
|
||||
#define TTS_EMPTY(slot) ((slot)->tts_isempty)
|
||||
#define ExecForceStoreHeapTuple(tuple, slot, shouldFree) \
|
||||
ExecStoreTuple(newTuple, tupleSlot, InvalidBuffer, shouldFree);
|
||||
#define table_open(r, l) heap_open(r, l)
|
||||
#define table_close(r, l) heap_close(r, l)
|
||||
#define TableScanDesc HeapScanDesc
|
||||
#define table_beginscan heap_beginscan
|
||||
#define table_endscan heap_endscan
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM < 130000
|
||||
#define detoast_attr(X) heap_tuple_untoast_attr(X)
|
||||
#endif
|
||||
|
|
|
@ -16,11 +16,7 @@
|
|||
#include "nodes/parsenodes.h"
|
||||
#include "nodes/plannodes.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#else
|
||||
#include "nodes/relation.h"
|
||||
#endif
|
||||
|
||||
|
||||
/* Function declarations for building local plans on the coordinator node */
|
||||
|
|
|
@ -408,7 +408,6 @@ extern List * PreprocessCreateEnumStmt(Node *stmt, const char *queryString,
|
|||
extern List * PostprocessCreateEnumStmt(Node *stmt, const char *queryString);
|
||||
extern List * PreprocessAlterEnumStmt(Node *stmt, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessAlterEnumStmt(Node *stmt, const char *queryString);
|
||||
extern List * PreprocessDropTypeStmt(Node *stmt, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PreprocessRenameTypeStmt(Node *stmt, const char *queryString,
|
||||
|
|
|
@ -16,11 +16,7 @@
|
|||
|
||||
#include "nodes/plannodes.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#else
|
||||
#include "nodes/relation.h"
|
||||
#endif
|
||||
|
||||
#include "distributed/citus_nodes.h"
|
||||
#include "distributed/errormessage.h"
|
||||
|
|
|
@ -15,26 +15,6 @@
|
|||
|
||||
#include "utils/hsearch.h"
|
||||
|
||||
/* pg12 includes this exact implementation of hash_combine */
|
||||
#if PG_VERSION_NUM < PG_VERSION_12
|
||||
|
||||
/*
|
||||
* Combine two hash values, resulting in another hash value, with decent bit
|
||||
* mixing.
|
||||
*
|
||||
* Similar to boost's hash_combine().
|
||||
*/
|
||||
static inline uint32
|
||||
hash_combine(uint32 a, uint32 b)
|
||||
{
|
||||
a ^= b + 0x9e3779b9 + (a << 6) + (a >> 2);
|
||||
return a;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
extern void hash_delete_all(HTAB *htab);
|
||||
|
||||
/*
|
||||
|
|
|
@ -58,9 +58,7 @@ typedef enum CitusRTEKind
|
|||
CITUS_RTE_VALUES = RTE_VALUES, /* VALUES (<exprlist>), (<exprlist>), ... */
|
||||
CITUS_RTE_CTE = RTE_CTE, /* common table expr (WITH list element) */
|
||||
CITUS_RTE_NAMEDTUPLESTORE = RTE_NAMEDTUPLESTORE, /* tuplestore, e.g. for triggers */
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_12)
|
||||
CITUS_RTE_RESULT = RTE_RESULT, /* RTE represents an empty FROM clause */
|
||||
#endif
|
||||
CITUS_RTE_SHARD,
|
||||
CITUS_RTE_REMOTE_QUERY
|
||||
} CitusRTEKind;
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#ifndef PG_VERSION_CONSTANTS
|
||||
#define PG_VERSION_CONSTANTS
|
||||
|
||||
#define PG_VERSION_11 110000
|
||||
#define PG_VERSION_12 120000
|
||||
#define PG_VERSION_13 130000
|
||||
#define PG_VERSION_14 140000
|
||||
|
|
|
@ -16,11 +16,7 @@
|
|||
#include "distributed/relation_restriction_equivalence.h"
|
||||
#include "nodes/pg_list.h"
|
||||
#include "nodes/primnodes.h"
|
||||
#if PG_VERSION_NUM >= PG_VERSION_12
|
||||
#include "nodes/pathnodes.h"
|
||||
#else
|
||||
#include "nodes/relation.h"
|
||||
#endif
|
||||
|
||||
typedef struct RecursivePlanningContextInternal RecursivePlanningContext;
|
||||
|
||||
|
|
|
@ -24,9 +24,7 @@
|
|||
#include "executor/tuptable.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
#include "parser/parse_func.h"
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_12)
|
||||
#include "optimizer/optimizer.h"
|
||||
#endif
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_13)
|
||||
#include "tcop/tcopprot.h"
|
||||
|
@ -125,87 +123,6 @@ FileCompatFromFileStart(File fileDesc)
|
|||
}
|
||||
|
||||
|
||||
#else /* pre PG12 */
|
||||
#define CreateTableSlotForRel(rel) MakeSingleTupleTableSlot(RelationGetDescr(rel))
|
||||
#define table_open(r, l) heap_open(r, l)
|
||||
#define table_openrv(r, l) heap_openrv(r, l)
|
||||
#define table_openrv_extended(r, l, m) heap_openrv_extended(r, l, m)
|
||||
#define table_close(r, l) heap_close(r, l)
|
||||
#define QTW_EXAMINE_RTES_BEFORE QTW_EXAMINE_RTES
|
||||
#define MakeSingleTupleTableSlotCompat(tupleDesc, tts_opts) \
|
||||
MakeSingleTupleTableSlot(tupleDesc)
|
||||
#define NextCopyFromCompat(cstate, econtext, values, nulls) \
|
||||
NextCopyFrom(cstate, econtext, values, nulls, NULL)
|
||||
|
||||
/*
|
||||
* In PG12 GetSysCacheOid requires an oid column,
|
||||
* whereas beforehand the oid column was implicit with WITH OIDS
|
||||
*/
|
||||
#define GetSysCacheOid1Compat(cacheId, oidcol, key1) \
|
||||
GetSysCacheOid1(cacheId, key1)
|
||||
#define GetSysCacheOid2Compat(cacheId, oidcol, key1, key2) \
|
||||
GetSysCacheOid2(cacheId, key1, key2)
|
||||
#define GetSysCacheOid3Compat(cacheId, oidcol, key1, key2, key3) \
|
||||
GetSysCacheOid3(cacheId, key1, key2, key3)
|
||||
#define GetSysCacheOid4Compat(cacheId, oidcol, key1, key2, key3, key4) \
|
||||
GetSysCacheOid4(cacheId, key1, key2, key3, key4)
|
||||
|
||||
#define LOCAL_FCINFO(name, nargs) \
|
||||
FunctionCallInfoData name ## data; \
|
||||
FunctionCallInfoData *name = &name ## data
|
||||
|
||||
#define fcGetArgValue(fc, n) ((fc)->arg[n])
|
||||
#define fcGetArgNull(fc, n) ((fc)->argnull[n])
|
||||
#define fcSetArgExt(fc, n, val, is_null) \
|
||||
(((fc)->argnull[n] = (is_null)), ((fc)->arg[n] = (val)))
|
||||
|
||||
typedef struct
|
||||
{
|
||||
File fd;
|
||||
} FileCompat;
|
||||
|
||||
static inline int
|
||||
FileWriteCompat(FileCompat *file, char *buffer, int amount, uint32 wait_event_info)
|
||||
{
|
||||
return FileWrite(file->fd, buffer, amount, wait_event_info);
|
||||
}
|
||||
|
||||
|
||||
static inline int
|
||||
FileReadCompat(FileCompat *file, char *buffer, int amount, uint32 wait_event_info)
|
||||
{
|
||||
return FileRead(file->fd, buffer, amount, wait_event_info);
|
||||
}
|
||||
|
||||
|
||||
static inline FileCompat
|
||||
FileCompatFromFileStart(File fileDesc)
|
||||
{
|
||||
FileCompat fc = {
|
||||
.fd = fileDesc,
|
||||
};
|
||||
|
||||
return fc;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* postgres 11 equivalent for a function with the same name in postgres 12+.
|
||||
*/
|
||||
static inline bool
|
||||
table_scan_getnextslot(HeapScanDesc scan, ScanDirection dir, TupleTableSlot *slot)
|
||||
{
|
||||
HeapTuple tuple = heap_getnext(scan, ForwardScanDirection);
|
||||
if (tuple == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
ExecStoreTuple(tuple, slot, InvalidBuffer, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
#endif /* PG12 */
|
||||
|
||||
#define fcSetArg(fc, n, value) fcSetArgExt(fc, n, value, false)
|
||||
|
|
|
@ -144,7 +144,6 @@ extern Node * ParseTreeRawStmt(const char *ddlCommand);
|
|||
|
||||
/* Function declarations for applying distributed execution primitives */
|
||||
extern Datum worker_fetch_partition_file(PG_FUNCTION_ARGS);
|
||||
extern Datum worker_fetch_query_results_file(PG_FUNCTION_ARGS);
|
||||
extern Datum worker_apply_shard_ddl_command(PG_FUNCTION_ARGS);
|
||||
extern Datum worker_range_partition_table(PG_FUNCTION_ARGS);
|
||||
extern Datum worker_hash_partition_table(PG_FUNCTION_ARGS);
|
||||
|
|
|
@ -43,12 +43,6 @@ s/"citus_local_table_([0-9]+)_[0-9]+"/"citus_local_table_\1_xxxxxxx"/g
|
|||
# normalize relation oid suffix for the truncate triggers created by citus
|
||||
s/truncate_trigger_[0-9]+/truncate_trigger_xxxxxxx/g
|
||||
|
||||
# (citus_table_triggers.sql)
|
||||
# postgres generates create trigger commands for triggers with:
|
||||
# "EXECUTE FUNCTION" in pg12
|
||||
# "EXECUTE PROCEDURE" in pg11
|
||||
s/FOR EACH (ROW|STATEMENT)(.*)EXECUTE PROCEDURE/FOR EACH \1\2EXECUTE FUNCTION/g
|
||||
|
||||
# In foreign_key_restriction_enforcement, normalize shard names
|
||||
s/"(on_update_fkey_table_|fkey_)[0-9]+"/"\1xxxxxxx"/g
|
||||
|
||||
|
@ -116,9 +110,6 @@ s/partition ".*" would be violated by some row/partition would be violated by so
|
|||
/.*Peak Memory Usage:.*$/d
|
||||
s/of relation ".*" contains null values/contains null values/g
|
||||
s/of relation "t1" is violated by some row/is violated by some row/g
|
||||
# can be removed when we remove PG_VERSION_NUM >= 120000
|
||||
s/(.*)Output:.*$/\1Output: xxxxxx/g
|
||||
|
||||
|
||||
# intermediate_results
|
||||
s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g
|
||||
|
@ -145,10 +136,6 @@ s/repartitioned_results_[0-9]+/repartitioned_results_xxxxx/g
|
|||
# ignore job id in worker_hash_partition_table
|
||||
s/worker_hash_partition_table \([0-9]+/worker_hash_partition_table \(xxxxxxx/g
|
||||
|
||||
# ignore first parameter for citus_extradata_container due to differences between pg11 and pg12
|
||||
# can be removed when we remove PG_VERSION_NUM >= 120000
|
||||
s/pg_catalog.citus_extradata_container\([0-9]+/pg_catalog.citus_extradata_container\(XXX/g
|
||||
|
||||
# ignore referene table replication messages
|
||||
/replicating reference table.*$/d
|
||||
|
||||
|
|
|
@ -1,900 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven;
|
||||
server_version_above_eleven
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
\gset
|
||||
CREATE SCHEMA alter_distributed_table;
|
||||
SET search_path TO alter_distributed_table;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (a INT, b INT);
|
||||
SELECT create_distributed_table ('dist_table', 'a', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO dist_table VALUES (1, 1), (2, 2), (3, 3);
|
||||
CREATE TABLE colocation_table (a INT, b INT);
|
||||
SELECT create_distributed_table ('colocation_table', 'a', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE colocation_table_2 (a INT, b INT);
|
||||
SELECT create_distributed_table ('colocation_table_2', 'a', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2');
|
||||
table_name | citus_table_type | distribution_column | shard_count
|
||||
---------------------------------------------------------------------
|
||||
colocation_table | distributed | a | 4
|
||||
colocation_table_2 | distributed | a | 4
|
||||
dist_table | distributed | a | 4
|
||||
(3 rows)
|
||||
|
||||
SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2') GROUP BY colocation_id ORDER BY 1;
|
||||
Colocation Groups
|
||||
---------------------------------------------------------------------
|
||||
colocation_table
|
||||
colocation_table_2
|
||||
dist_table
|
||||
(3 rows)
|
||||
|
||||
-- test altering distribution column
|
||||
SELECT alter_distributed_table('dist_table', distribution_column := 'b');
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2');
|
||||
table_name | citus_table_type | distribution_column | shard_count
|
||||
---------------------------------------------------------------------
|
||||
colocation_table | distributed | a | 4
|
||||
colocation_table_2 | distributed | a | 4
|
||||
dist_table | distributed | b | 4
|
||||
(3 rows)
|
||||
|
||||
SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2') GROUP BY colocation_id ORDER BY 1;
|
||||
Colocation Groups
|
||||
---------------------------------------------------------------------
|
||||
colocation_table
|
||||
colocation_table_2
|
||||
dist_table
|
||||
(3 rows)
|
||||
|
||||
-- test altering shard count
|
||||
SELECT alter_distributed_table('dist_table', shard_count := 6);
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2');
|
||||
table_name | citus_table_type | distribution_column | shard_count
|
||||
---------------------------------------------------------------------
|
||||
colocation_table | distributed | a | 4
|
||||
colocation_table_2 | distributed | a | 4
|
||||
dist_table | distributed | b | 6
|
||||
(3 rows)
|
||||
|
||||
SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2') GROUP BY colocation_id ORDER BY 1;
|
||||
Colocation Groups
|
||||
---------------------------------------------------------------------
|
||||
colocation_table
|
||||
colocation_table_2
|
||||
dist_table
|
||||
(3 rows)
|
||||
|
||||
-- test altering colocation, note that shard count will also change
|
||||
SELECT alter_distributed_table('dist_table', colocate_with := 'alter_distributed_table.colocation_table');
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2');
|
||||
table_name | citus_table_type | distribution_column | shard_count
|
||||
---------------------------------------------------------------------
|
||||
colocation_table | distributed | a | 4
|
||||
colocation_table_2 | distributed | a | 4
|
||||
dist_table | distributed | b | 4
|
||||
(3 rows)
|
||||
|
||||
SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2') GROUP BY colocation_id ORDER BY 1;
|
||||
Colocation Groups
|
||||
---------------------------------------------------------------------
|
||||
colocation_table, dist_table
|
||||
colocation_table_2
|
||||
(2 rows)
|
||||
|
||||
-- test altering shard count with cascading, note that the colocation will be kept
|
||||
SELECT alter_distributed_table('dist_table', shard_count := 8, cascade_to_colocated := true);
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
NOTICE: creating a new table for alter_distributed_table.colocation_table
|
||||
NOTICE: moving the data of alter_distributed_table.colocation_table
|
||||
NOTICE: dropping the old alter_distributed_table.colocation_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.colocation_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2');
|
||||
table_name | citus_table_type | distribution_column | shard_count
|
||||
---------------------------------------------------------------------
|
||||
colocation_table | distributed | a | 8
|
||||
colocation_table_2 | distributed | a | 4
|
||||
dist_table | distributed | b | 8
|
||||
(3 rows)
|
||||
|
||||
SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2') GROUP BY colocation_id ORDER BY 1;
|
||||
Colocation Groups
|
||||
---------------------------------------------------------------------
|
||||
colocation_table, dist_table
|
||||
colocation_table_2
|
||||
(2 rows)
|
||||
|
||||
-- test altering shard count without cascading, note that the colocation will be broken
|
||||
SELECT alter_distributed_table('dist_table', shard_count := 10, cascade_to_colocated := false);
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2');
|
||||
table_name | citus_table_type | distribution_column | shard_count
|
||||
---------------------------------------------------------------------
|
||||
colocation_table | distributed | a | 8
|
||||
colocation_table_2 | distributed | a | 4
|
||||
dist_table | distributed | b | 10
|
||||
(3 rows)
|
||||
|
||||
SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables
|
||||
WHERE table_name IN ('dist_table', 'colocation_table', 'colocation_table_2') GROUP BY colocation_id ORDER BY 1;
|
||||
Colocation Groups
|
||||
---------------------------------------------------------------------
|
||||
colocation_table
|
||||
colocation_table_2
|
||||
dist_table
|
||||
(3 rows)
|
||||
|
||||
-- test partitions
|
||||
CREATE TABLE partitioned_table (id INT, a INT) PARTITION BY RANGE (id);
|
||||
SELECT create_distributed_table('partitioned_table', 'id', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE partitioned_table_1_5 PARTITION OF partitioned_table FOR VALUES FROM (1) TO (5);
|
||||
CREATE TABLE partitioned_table_6_10 PARTITION OF partitioned_table FOR VALUES FROM (6) TO (10);
|
||||
INSERT INTO partitioned_table VALUES (2, 12), (7, 2);
|
||||
SELECT logicalrelid::text FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE 'partitioned\_table%' ORDER BY 1;
|
||||
logicalrelid
|
||||
---------------------------------------------------------------------
|
||||
partitioned_table
|
||||
partitioned_table_1_5
|
||||
partitioned_table_6_10
|
||||
(3 rows)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_catalog.pg_class WHERE relname LIKE 'partitioned\_table%'$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,6)
|
||||
(localhost,57638,t,6)
|
||||
(2 rows)
|
||||
|
||||
SELECT inhrelid::regclass::text FROM pg_catalog.pg_inherits WHERE inhparent = 'partitioned_table'::regclass ORDER BY 1;
|
||||
inhrelid
|
||||
---------------------------------------------------------------------
|
||||
partitioned_table_1_5
|
||||
partitioned_table_6_10
|
||||
(2 rows)
|
||||
|
||||
SELECT table_name::text, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'partitioned\_table%' ORDER BY 1;
|
||||
table_name | distribution_column | shard_count
|
||||
---------------------------------------------------------------------
|
||||
partitioned_table | id | 4
|
||||
partitioned_table_1_5 | id | 4
|
||||
partitioned_table_6_10 | id | 4
|
||||
(3 rows)
|
||||
|
||||
SELECT * FROM partitioned_table ORDER BY 1, 2;
|
||||
id | a
|
||||
---------------------------------------------------------------------
|
||||
2 | 12
|
||||
7 | 2
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM partitioned_table_1_5 ORDER BY 1, 2;
|
||||
id | a
|
||||
---------------------------------------------------------------------
|
||||
2 | 12
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM partitioned_table_6_10 ORDER BY 1, 2;
|
||||
id | a
|
||||
---------------------------------------------------------------------
|
||||
7 | 2
|
||||
(1 row)
|
||||
|
||||
-- test altering the parent table
|
||||
SELECT alter_distributed_table('partitioned_table', shard_count := 10, distribution_column := 'a');
|
||||
NOTICE: converting the partitions of alter_distributed_table.partitioned_table
|
||||
NOTICE: creating a new table for alter_distributed_table.partitioned_table_1_5
|
||||
NOTICE: moving the data of alter_distributed_table.partitioned_table_1_5
|
||||
NOTICE: dropping the old alter_distributed_table.partitioned_table_1_5
|
||||
NOTICE: renaming the new table to alter_distributed_table.partitioned_table_1_5
|
||||
NOTICE: creating a new table for alter_distributed_table.partitioned_table_6_10
|
||||
NOTICE: moving the data of alter_distributed_table.partitioned_table_6_10
|
||||
NOTICE: dropping the old alter_distributed_table.partitioned_table_6_10
|
||||
NOTICE: renaming the new table to alter_distributed_table.partitioned_table_6_10
|
||||
NOTICE: creating a new table for alter_distributed_table.partitioned_table
|
||||
NOTICE: dropping the old alter_distributed_table.partitioned_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.partitioned_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test altering the partition
|
||||
SELECT alter_distributed_table('partitioned_table_1_5', shard_count := 10, distribution_column := 'a');
|
||||
ERROR: cannot complete operation because table is a partition
|
||||
HINT: the parent table is "partitioned_table"
|
||||
SELECT logicalrelid::text FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE 'partitioned\_table%' ORDER BY 1;
|
||||
logicalrelid
|
||||
---------------------------------------------------------------------
|
||||
partitioned_table
|
||||
partitioned_table_1_5
|
||||
partitioned_table_6_10
|
||||
(3 rows)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_catalog.pg_class WHERE relname LIKE 'partitioned\_table%'$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,15)
|
||||
(localhost,57638,t,15)
|
||||
(2 rows)
|
||||
|
||||
SELECT inhrelid::regclass::text FROM pg_catalog.pg_inherits WHERE inhparent = 'partitioned_table'::regclass ORDER BY 1;
|
||||
inhrelid
|
||||
---------------------------------------------------------------------
|
||||
partitioned_table_1_5
|
||||
partitioned_table_6_10
|
||||
(2 rows)
|
||||
|
||||
SELECT table_name::text, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'partitioned\_table%' ORDER BY 1;
|
||||
table_name | distribution_column | shard_count
|
||||
---------------------------------------------------------------------
|
||||
partitioned_table | a | 10
|
||||
partitioned_table_1_5 | a | 10
|
||||
partitioned_table_6_10 | a | 10
|
||||
(3 rows)
|
||||
|
||||
SELECT * FROM partitioned_table ORDER BY 1, 2;
|
||||
id | a
|
||||
---------------------------------------------------------------------
|
||||
2 | 12
|
||||
7 | 2
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM partitioned_table_1_5 ORDER BY 1, 2;
|
||||
id | a
|
||||
---------------------------------------------------------------------
|
||||
2 | 12
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM partitioned_table_6_10 ORDER BY 1, 2;
|
||||
id | a
|
||||
---------------------------------------------------------------------
|
||||
7 | 2
|
||||
(1 row)
|
||||
|
||||
-- test references
|
||||
CREATE TABLE referenced_dist_table (a INT UNIQUE);
|
||||
CREATE TABLE referenced_ref_table (a INT UNIQUE);
|
||||
CREATE TABLE table_with_references (a1 INT UNIQUE REFERENCES referenced_dist_table(a), a2 INT REFERENCES referenced_ref_table(a));
|
||||
CREATE TABLE referencing_dist_table (a INT REFERENCES table_with_references(a1));
|
||||
SELECT create_distributed_table('referenced_dist_table', 'a', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('referenced_ref_table');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('table_with_references', 'a1', colocate_with:='referenced_dist_table');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('referencing_dist_table', 'a', colocate_with:='referenced_dist_table');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
|
||||
WHERE (conrelid::regclass::text = 'table_with_references' OR confrelid::regclass::text = 'table_with_references') AND contype = 'f' ORDER BY 1;
|
||||
Referencing Table | Definition
|
||||
---------------------------------------------------------------------
|
||||
referencing_dist_table | FOREIGN KEY (a) REFERENCES table_with_references(a1)
|
||||
table_with_references | FOREIGN KEY (a1) REFERENCES referenced_dist_table(a)
|
||||
table_with_references | FOREIGN KEY (a2) REFERENCES referenced_ref_table(a)
|
||||
(3 rows)
|
||||
|
||||
SELECT alter_distributed_table('table_with_references', shard_count := 12, cascade_to_colocated := true);
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
|
||||
WHERE (conrelid::regclass::text = 'table_with_references' OR confrelid::regclass::text = 'table_with_references') AND contype = 'f' ORDER BY 1;
|
||||
Referencing Table | Definition
|
||||
---------------------------------------------------------------------
|
||||
referencing_dist_table | FOREIGN KEY (a) REFERENCES table_with_references(a1)
|
||||
table_with_references | FOREIGN KEY (a2) REFERENCES referenced_ref_table(a)
|
||||
table_with_references | FOREIGN KEY (a1) REFERENCES referenced_dist_table(a)
|
||||
(3 rows)
|
||||
|
||||
SELECT alter_distributed_table('table_with_references', shard_count := 10, cascade_to_colocated := false);
|
||||
WARNING: foreign key table_with_references_a1_fkey will be dropped
|
||||
WARNING: foreign key referencing_dist_table_a_fkey will be dropped
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
|
||||
WHERE (conrelid::regclass::text = 'table_with_references' OR confrelid::regclass::text = 'table_with_references') AND contype = 'f' ORDER BY 1;
|
||||
Referencing Table | Definition
|
||||
---------------------------------------------------------------------
|
||||
table_with_references | FOREIGN KEY (a2) REFERENCES referenced_ref_table(a)
|
||||
(1 row)
|
||||
|
||||
-- check when multi shard modify mode is set to sequential
|
||||
SELECT alter_distributed_table('referenced_dist_table', colocate_with:='none');
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE ref_to_dist_table(a INT REFERENCES referenced_dist_table(a));
|
||||
CREATE TABLE ref_to_ref_table(a INT REFERENCES referenced_ref_table(a));
|
||||
SELECT create_distributed_table('ref_to_dist_table', 'a', colocate_with:='referenced_dist_table');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('ref_to_ref_table', 'a', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- alter a table referencing a reference table
|
||||
SELECT alter_distributed_table('ref_to_ref_table', shard_count:=6);
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- let's create a table that is not colocated with a table that references a reference table
|
||||
CREATE TABLE col_with_ref_to_dist (a INT);
|
||||
SELECT create_distributed_table('col_with_ref_to_dist', 'a', colocate_with:='ref_to_dist_table');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- and create a table colocated with a table that references a reference table
|
||||
CREATE TABLE col_with_ref_to_ref (a INT);
|
||||
SELECT alter_distributed_table('ref_to_ref_table', colocate_with:='none');
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('col_with_ref_to_ref', 'a', colocate_with:='ref_to_ref_table');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- alter a table colocated with a table referencing a reference table with cascading
|
||||
SELECT alter_distributed_table('col_with_ref_to_ref', shard_count:=8, cascade_to_colocated:=true);
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- alter a table colocated with a table referencing a reference table without cascading
|
||||
SELECT alter_distributed_table('col_with_ref_to_ref', shard_count:=10, cascade_to_colocated:=false);
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- alter a table not colocated with a table referencing a reference table with cascading
|
||||
SELECT alter_distributed_table('col_with_ref_to_dist', shard_count:=6, cascade_to_colocated:=true);
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\if :server_version_above_eleven
|
||||
-- test altering columnar table
|
||||
CREATE TABLE columnar_table (a INT) USING columnar;
|
||||
SELECT create_distributed_table('columnar_table', 'a', colocate_with:='none');
|
||||
SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHERE table_name::text = 'columnar_table';
|
||||
SELECT alter_distributed_table('columnar_table', shard_count:=6);
|
||||
SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHERE table_name::text = 'columnar_table';
|
||||
\endif
|
||||
-- test with metadata sync
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE metadata_sync_table (a BIGSERIAL);
|
||||
SELECT create_distributed_table('metadata_sync_table', 'a', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table('metadata_sync_table', shard_count:=6);
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table('metadata_sync_table', shard_count:=8);
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT table_name, shard_count FROM public.citus_tables WHERE table_name::text = 'metadata_sync_table';
|
||||
table_name | shard_count
|
||||
---------------------------------------------------------------------
|
||||
metadata_sync_table | 8
|
||||
(1 row)
|
||||
|
||||
SET citus.replication_model TO DEFAULT;
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
stop_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test complex cascade operations
|
||||
CREATE TABLE cas_1 (a INT UNIQUE);
|
||||
CREATE TABLE cas_2 (a INT UNIQUE);
|
||||
CREATE TABLE cas_3 (a INT UNIQUE);
|
||||
CREATE TABLE cas_4 (a INT UNIQUE);
|
||||
CREATE TABLE cas_par (a INT UNIQUE) PARTITION BY RANGE(a);
|
||||
CREATE TABLE cas_par_1 PARTITION OF cas_par FOR VALUES FROM (1) TO (4);
|
||||
CREATE TABLE cas_par_2 PARTITION OF cas_par FOR VALUES FROM (5) TO (8);
|
||||
CREATE TABLE cas_col (a INT UNIQUE);
|
||||
-- add foreign keys from and to partitions
|
||||
ALTER TABLE cas_par_1 ADD CONSTRAINT fkey_from_par_1 FOREIGN KEY (a) REFERENCES cas_1(a);
|
||||
ALTER TABLE cas_2 ADD CONSTRAINT fkey_to_par_1 FOREIGN KEY (a) REFERENCES cas_par_1(a);
|
||||
ALTER TABLE cas_par ADD CONSTRAINT fkey_from_par FOREIGN KEY (a) REFERENCES cas_3(a);
|
||||
ALTER TABLE cas_4 ADD CONSTRAINT fkey_to_par FOREIGN KEY (a) REFERENCES cas_par(a);
|
||||
ERROR: cannot reference partitioned table "cas_par"
|
||||
-- distribute all the tables
|
||||
SELECT create_distributed_table('cas_1', 'a', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('cas_3', 'a', colocate_with:='cas_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('cas_par', 'a', colocate_with:='cas_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('cas_2', 'a', colocate_with:='cas_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('cas_4', 'a', colocate_with:='cas_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('cas_col', 'a', colocate_with:='cas_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
|
||||
WHERE (conrelid::regclass::text = 'cas_par_1' OR confrelid::regclass::text = 'cas_par_1') ORDER BY 1, 2;
|
||||
Referencing Table | Definition
|
||||
---------------------------------------------------------------------
|
||||
cas_2 | FOREIGN KEY (a) REFERENCES cas_par_1(a)
|
||||
cas_par_1 | FOREIGN KEY (a) REFERENCES cas_1(a)
|
||||
cas_par_1 | FOREIGN KEY (a) REFERENCES cas_3(a)
|
||||
cas_par_1 | UNIQUE (a)
|
||||
(4 rows)
|
||||
|
||||
SELECT inhrelid::regclass::text FROM pg_catalog.pg_inherits WHERE inhparent = 'cas_par'::regclass ORDER BY 1;
|
||||
inhrelid
|
||||
---------------------------------------------------------------------
|
||||
cas_par_1
|
||||
cas_par_2
|
||||
(2 rows)
|
||||
|
||||
-- alter the cas_col and cascade the change
|
||||
SELECT alter_distributed_table('cas_col', shard_count:=6, cascade_to_colocated:=true);
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
|
||||
WHERE (conrelid::regclass::text = 'cas_par_1' OR confrelid::regclass::text = 'cas_par_1') ORDER BY 1, 2;
|
||||
Referencing Table | Definition
|
||||
---------------------------------------------------------------------
|
||||
cas_2 | FOREIGN KEY (a) REFERENCES cas_par_1(a)
|
||||
cas_par_1 | FOREIGN KEY (a) REFERENCES cas_1(a)
|
||||
cas_par_1 | FOREIGN KEY (a) REFERENCES cas_3(a)
|
||||
cas_par_1 | UNIQUE (a)
|
||||
(4 rows)
|
||||
|
||||
SELECT inhrelid::regclass::text FROM pg_catalog.pg_inherits WHERE inhparent = 'cas_par'::regclass ORDER BY 1;
|
||||
inhrelid
|
||||
---------------------------------------------------------------------
|
||||
cas_par_1
|
||||
cas_par_2
|
||||
(2 rows)
|
||||
|
||||
SET client_min_messages TO DEFAULT;
|
||||
-- test changing dist column and colocating partitioned table without changing shard count
|
||||
CREATE TABLE col_table (a INT);
|
||||
SELECT create_distributed_table('col_table', 'a', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE par_table (a BIGINT, b INT) PARTITION BY RANGE (a);
|
||||
SELECT create_distributed_table('par_table', 'a', colocate_with:='none');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE par_table_1 (a BIGINT, b INT);
|
||||
SELECT create_distributed_table('par_table_1', 'a', colocate_with:='par_table');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE par_table ATTACH PARTITION par_table_1 FOR VALUES FROM (1) TO (5);
|
||||
SELECT alter_distributed_table('par_table', distribution_column:='b', colocate_with:='col_table');
|
||||
NOTICE: converting the partitions of alter_distributed_table.par_table
|
||||
NOTICE: creating a new table for alter_distributed_table.par_table_1
|
||||
NOTICE: moving the data of alter_distributed_table.par_table_1
|
||||
NOTICE: dropping the old alter_distributed_table.par_table_1
|
||||
NOTICE: renaming the new table to alter_distributed_table.par_table_1
|
||||
NOTICE: creating a new table for alter_distributed_table.par_table
|
||||
NOTICE: dropping the old alter_distributed_table.par_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.par_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test messages
|
||||
-- test nothing to change
|
||||
SELECT alter_distributed_table('dist_table');
|
||||
ERROR: you have to specify at least one of the distribution_column, shard_count or colocate_with parameters
|
||||
SELECT alter_distributed_table('dist_table', cascade_to_colocated := false);
|
||||
ERROR: you have to specify at least one of the distribution_column, shard_count or colocate_with parameters
|
||||
-- no operation UDF calls
|
||||
SELECT alter_distributed_table('dist_table', distribution_column := 'b');
|
||||
ERROR: this call doesn't change any properties of the table
|
||||
HINT: check citus_tables view to see current properties of the table
|
||||
SELECT alter_distributed_table('dist_table', shard_count := 10);
|
||||
ERROR: this call doesn't change any properties of the table
|
||||
HINT: check citus_tables view to see current properties of the table
|
||||
-- first colocate the tables, then try to re-colococate
|
||||
SELECT alter_distributed_table('dist_table', colocate_with := 'colocation_table');
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table('dist_table', colocate_with := 'colocation_table');
|
||||
ERROR: this call doesn't change any properties of the table
|
||||
HINT: check citus_tables view to see current properties of the table
|
||||
-- test some changes while keeping others same
|
||||
-- shouldn't error but should have notices about no-change parameters
|
||||
SELECT alter_distributed_table('dist_table', distribution_column:='b', shard_count:=4, cascade_to_colocated:=false);
|
||||
NOTICE: table is already distributed by b
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table('dist_table', shard_count:=4, colocate_with:='colocation_table_2');
|
||||
NOTICE: shard count of the table is already 4
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table('dist_table', colocate_with:='colocation_table_2', distribution_column:='a');
|
||||
NOTICE: table is already colocated with colocation_table_2
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test cascading distribution column, should error
|
||||
SELECT alter_distributed_table('dist_table', distribution_column := 'b', cascade_to_colocated := true);
|
||||
ERROR: distribution_column cannot be cascaded to colocated tables
|
||||
SELECT alter_distributed_table('dist_table', distribution_column := 'b', shard_count:=12, colocate_with:='colocation_table_2', cascade_to_colocated := true);
|
||||
ERROR: distribution_column cannot be cascaded to colocated tables
|
||||
-- test nothing to cascade
|
||||
SELECT alter_distributed_table('dist_table', cascade_to_colocated := true);
|
||||
ERROR: shard_count or colocate_with is necessary for cascading to colocated tables
|
||||
-- test cascading colocate_with := 'none'
|
||||
SELECT alter_distributed_table('dist_table', colocate_with := 'none', cascade_to_colocated := true);
|
||||
ERROR: colocate_with := 'none' cannot be cascaded to colocated tables
|
||||
-- test changing shard count of a colocated table without cascade_to_colocated, should error
|
||||
SELECT alter_distributed_table('dist_table', shard_count := 14);
|
||||
ERROR: cascade_to_colocated parameter is necessary
|
||||
DETAIL: this table is colocated with some other tables
|
||||
HINT: cascade_to_colocated := false will break the current colocation, cascade_to_colocated := true will change the shard count of colocated tables too.
|
||||
-- test changing shard count of a non-colocated table without cascade_to_colocated, shouldn't error
|
||||
SELECT alter_distributed_table('dist_table', colocate_with := 'none');
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table('dist_table', shard_count := 14);
|
||||
NOTICE: creating a new table for alter_distributed_table.dist_table
|
||||
NOTICE: moving the data of alter_distributed_table.dist_table
|
||||
NOTICE: dropping the old alter_distributed_table.dist_table
|
||||
NOTICE: renaming the new table to alter_distributed_table.dist_table
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test altering a table into colocating with a table but giving a different shard count
|
||||
SELECT alter_distributed_table('dist_table', colocate_with := 'colocation_table', shard_count := 16);
|
||||
ERROR: shard_count cannot be different than the shard count of the table in colocate_with
|
||||
HINT: if no shard_count is specified shard count will be same with colocate_with table's
|
||||
-- test colocation with distribution columns with different data types
|
||||
CREATE TABLE different_type_table (a TEXT);
|
||||
SELECT create_distributed_table('different_type_table', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table('dist_table', colocate_with := 'different_type_table');
|
||||
ERROR: cannot colocate with different_type_table because data type of its distribution column is different than dist_table
|
||||
SELECT alter_distributed_table('dist_table', distribution_column := 'a', colocate_with := 'different_type_table');
|
||||
ERROR: cannot colocate with different_type_table and change distribution column to a because data type of column a is different then the distribution column of the different_type_table
|
||||
-- test shard_count := 0
|
||||
SELECT alter_distributed_table('dist_table', shard_count := 0);
|
||||
ERROR: shard_count cannot be 0
|
||||
HINT: if you no longer want this to be a distributed table you can try undistribute_table() function
|
||||
-- test colocating with non-distributed table
|
||||
CREATE TABLE reference_table (a INT);
|
||||
SELECT create_reference_table('reference_table');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table('dist_table', colocate_with:='reference_table');
|
||||
ERROR: cannot colocate with reference_table because it is not a distributed table
|
||||
-- test append table
|
||||
CREATE TABLE append_table (a INT);
|
||||
SELECT create_distributed_table('append_table', 'a', 'append');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table('append_table', shard_count:=6);
|
||||
ERROR: relation append_table should be a hash distributed table
|
||||
-- test keeping dependent materialized views
|
||||
CREATE TABLE mat_view_test (a int, b int);
|
||||
SELECT create_distributed_table('mat_view_test', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO mat_view_test VALUES (1,1), (2,2);
|
||||
CREATE MATERIALIZED VIEW mat_view AS SELECT * FROM mat_view_test;
|
||||
SELECT alter_distributed_table('mat_view_test', shard_count := 5, cascade_to_colocated := false);
|
||||
NOTICE: creating a new table for alter_distributed_table.mat_view_test
|
||||
NOTICE: moving the data of alter_distributed_table.mat_view_test
|
||||
NOTICE: dropping the old alter_distributed_table.mat_view_test
|
||||
NOTICE: drop cascades to materialized view mat_view
|
||||
CONTEXT: SQL statement "DROP TABLE alter_distributed_table.mat_view_test CASCADE"
|
||||
NOTICE: renaming the new table to alter_distributed_table.mat_view_test
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM mat_view ORDER BY a;
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
(2 rows)
|
||||
|
||||
-- test long table names
|
||||
SET client_min_messages TO DEBUG1;
|
||||
CREATE TABLE abcde_0123456789012345678901234567890123456789012345678901234567890123456789 (x int, y int);
|
||||
NOTICE: identifier "abcde_0123456789012345678901234567890123456789012345678901234567890123456789" will be truncated to "abcde_012345678901234567890123456789012345678901234567890123456"
|
||||
SELECT create_distributed_table('abcde_0123456789012345678901234567890123456789012345678901234567890123456789', 'x');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT alter_distributed_table('abcde_0123456789012345678901234567890123456789012345678901234567890123456789', distribution_column := 'y');
|
||||
DEBUG: the name of the shard (abcde_01234567890123456789012345678901234567890_f7ff6612_xxxxxx) for relation (abcde_012345678901234567890123456789012345678901234567890123456) is too long, switching to sequential and local execution mode to prevent self deadlocks
|
||||
NOTICE: creating a new table for alter_distributed_table.abcde_012345678901234567890123456789012345678901234567890123456
|
||||
NOTICE: moving the data of alter_distributed_table.abcde_012345678901234567890123456789012345678901234567890123456
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
|
||||
CONTEXT: SQL statement "INSERT INTO alter_distributed_table.abcde_0123456789012345678901234567890123456_f7ff6612_4160710162 (x,y) SELECT x,y FROM alter_distributed_table.abcde_012345678901234567890123456789012345678901234567890123456"
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
CONTEXT: SQL statement "INSERT INTO alter_distributed_table.abcde_0123456789012345678901234567890123456_f7ff6612_4160710162 (x,y) SELECT x,y FROM alter_distributed_table.abcde_012345678901234567890123456789012345678901234567890123456"
|
||||
NOTICE: dropping the old alter_distributed_table.abcde_012345678901234567890123456789012345678901234567890123456
|
||||
CONTEXT: SQL statement "DROP TABLE alter_distributed_table.abcde_012345678901234567890123456789012345678901234567890123456 CASCADE"
|
||||
NOTICE: renaming the new table to alter_distributed_table.abcde_012345678901234567890123456789012345678901234567890123456
|
||||
DEBUG: the name of the shard (abcde_01234567890123456789012345678901234567890_f7ff6612_xxxxxx) for relation (abcde_012345678901234567890123456789012345678901234567890123456) is too long, switching to sequential and local execution mode to prevent self deadlocks
|
||||
CONTEXT: SQL statement "ALTER TABLE alter_distributed_table.abcde_0123456789012345678901234567890123456_f7ff6612_4160710162 RENAME TO abcde_012345678901234567890123456789012345678901234567890123456"
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET client_min_messages;
|
||||
-- test long partitioned table names
|
||||
CREATE TABLE partition_lengths
|
||||
(
|
||||
tenant_id integer NOT NULL,
|
||||
timeperiod timestamp without time zone NOT NULL,
|
||||
inserted_utc timestamp without time zone NOT NULL DEFAULT now()
|
||||
) PARTITION BY RANGE (timeperiod);
|
||||
SELECT create_distributed_table('partition_lengths', 'tenant_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE partition_lengths_p2020_09_28_12345678901234567890123456789012345678901234567890 PARTITION OF partition_lengths FOR VALUES FROM ('2020-09-28 00:00:00') TO ('2020-09-29 00:00:00');
|
||||
NOTICE: identifier "partition_lengths_p2020_09_28_12345678901234567890123456789012345678901234567890" will be truncated to "partition_lengths_p2020_09_28_123456789012345678901234567890123"
|
||||
-- verify alter_distributed_table works with long partition names
|
||||
SELECT alter_distributed_table('partition_lengths', shard_count := 29, cascade_to_colocated := false);
|
||||
NOTICE: converting the partitions of alter_distributed_table.partition_lengths
|
||||
NOTICE: creating a new table for alter_distributed_table.partition_lengths_p2020_09_28_123456789012345678901234567890123
|
||||
NOTICE: moving the data of alter_distributed_table.partition_lengths_p2020_09_28_123456789012345678901234567890123
|
||||
NOTICE: dropping the old alter_distributed_table.partition_lengths_p2020_09_28_123456789012345678901234567890123
|
||||
NOTICE: renaming the new table to alter_distributed_table.partition_lengths_p2020_09_28_123456789012345678901234567890123
|
||||
NOTICE: creating a new table for alter_distributed_table.partition_lengths
|
||||
NOTICE: dropping the old alter_distributed_table.partition_lengths
|
||||
NOTICE: renaming the new table to alter_distributed_table.partition_lengths
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test long partition table names
|
||||
ALTER TABLE partition_lengths_p2020_09_28_12345678901234567890123456789012345678901234567890 RENAME TO partition_lengths_p2020_09_28;
|
||||
NOTICE: identifier "partition_lengths_p2020_09_28_12345678901234567890123456789012345678901234567890" will be truncated to "partition_lengths_p2020_09_28_123456789012345678901234567890123"
|
||||
ALTER TABLE partition_lengths RENAME TO partition_lengths_12345678901234567890123456789012345678901234567890;
|
||||
NOTICE: identifier "partition_lengths_12345678901234567890123456789012345678901234567890" will be truncated to "partition_lengths_123456789012345678901234567890123456789012345"
|
||||
-- verify alter_distributed_table works with long partitioned table names
|
||||
SELECT alter_distributed_table('partition_lengths_12345678901234567890123456789012345678901234567890', shard_count := 17, cascade_to_colocated := false);
|
||||
NOTICE: converting the partitions of alter_distributed_table.partition_lengths_123456789012345678901234567890123456789012345
|
||||
NOTICE: creating a new table for alter_distributed_table.partition_lengths_p2020_09_28
|
||||
NOTICE: moving the data of alter_distributed_table.partition_lengths_p2020_09_28
|
||||
NOTICE: dropping the old alter_distributed_table.partition_lengths_p2020_09_28
|
||||
NOTICE: renaming the new table to alter_distributed_table.partition_lengths_p2020_09_28
|
||||
NOTICE: creating a new table for alter_distributed_table.partition_lengths_123456789012345678901234567890123456789012345
|
||||
NOTICE: dropping the old alter_distributed_table.partition_lengths_123456789012345678901234567890123456789012345
|
||||
NOTICE: renaming the new table to alter_distributed_table.partition_lengths_123456789012345678901234567890123456789012345
|
||||
alter_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA alter_distributed_table CASCADE;
|
|
@ -1,12 +0,0 @@
|
|||
-- test for Postgres version
|
||||
-- should error before PG12
|
||||
CREATE TABLE alter_am_pg_version_table (a INT);
|
||||
SELECT alter_table_set_access_method('alter_am_pg_version_table', 'columnar');
|
||||
ERROR: table access methods are not supported for Postgres versions earlier than 12
|
||||
DROP TABLE alter_am_pg_version_table;
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven
|
||||
\gset
|
||||
\if :server_version_above_eleven
|
||||
\else
|
||||
\q
|
|
@ -335,7 +335,6 @@ NOTICE: executing the command locally: SELECT ref.a, ref.b, local.x, local.y FR
|
|||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
set citus.enable_cte_inlining to off;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM test;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503000 test WHERE true
|
||||
|
@ -348,11 +347,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinato
|
|||
-- we wont see the modifying cte in this query because we will use local execution and
|
||||
-- in postgres we wouldn't see this modifying cte, so it is consistent with postgres.
|
||||
WITH a AS (SELECT count(*) FROM test), b AS (INSERT INTO local VALUES (3,2) RETURNING *), c AS (INSERT INTO ref VALUES (3,2) RETURNING *), d AS (SELECT count(*) FROM ref JOIN local ON (a = x)) SELECT * FROM a, b, c, d ORDER BY x,y,a,b;
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 (a, b) VALUES (3, 2) RETURNING a, b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503000 test WHERE true
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503003 test WHERE true
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 (a, b) VALUES (3, 2) RETURNING a, b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN coordinator_shouldhaveshards.local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) a, (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) b, (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) c, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) d ORDER BY b.x, b.y, c.a, c.b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN (SELECT local_1.x, NULL::integer AS y FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) local_1) local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) a, (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) b, (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) c, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) d ORDER BY b.x, b.y, c.a, c.b
|
||||
count | x | y | a | b | count
|
||||
---------------------------------------------------------------------
|
||||
100 | 3 | 2 | 3 | 2 | 0
|
||||
|
@ -369,11 +368,11 @@ NOTICE: executing the command locally: SELECT ref.a, ref.b, local.x, local.y FR
|
|||
-- we wont see the modifying cte in this query because we will use local execution and
|
||||
-- in postgres we wouldn't see this modifying cte, so it is consistent with postgres.
|
||||
WITH a AS (SELECT count(*) FROM test), b AS (INSERT INTO local VALUES (3,2) RETURNING *), c AS (INSERT INTO ref VALUES (3,2) RETURNING *), d AS (SELECT count(*) FROM ref JOIN local ON (a = x)) SELECT * FROM a, b, c, d ORDER BY x,y,a,b;
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 (a, b) VALUES (3, 2) RETURNING a, b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503000 test WHERE true
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503003 test WHERE true
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 (a, b) VALUES (3, 2) RETURNING a, b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN coordinator_shouldhaveshards.local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) a, (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) b, (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) c, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) d ORDER BY b.x, b.y, c.a, c.b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN (SELECT local_1.x, NULL::integer AS y FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) local_1) local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) a, (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) b, (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) c, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) d ORDER BY b.x, b.y, c.a, c.b
|
||||
count | x | y | a | b | count
|
||||
---------------------------------------------------------------------
|
||||
100 | 3 | 2 | 3 | 2 | 0
|
||||
|
@ -384,11 +383,11 @@ BEGIN;
|
|||
-- we wont see the modifying cte in this query because we will use local execution and
|
||||
-- in postgres we wouldn't see this modifying cte, so it is consistent with postgres.
|
||||
WITH a AS (SELECT count(*) FROM test), b AS (INSERT INTO local VALUES (3,2) RETURNING *), c AS (INSERT INTO ref VALUES (3,2) RETURNING *), d AS (SELECT count(*) FROM ref JOIN local ON (a = x)) SELECT * FROM a, b, c, d ORDER BY x,y,a,b;
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 (a, b) VALUES (3, 2) RETURNING a, b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503000 test WHERE true
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503003 test WHERE true
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 (a, b) VALUES (3, 2) RETURNING a, b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN coordinator_shouldhaveshards.local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) a, (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) b, (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) c, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) d ORDER BY b.x, b.y, c.a, c.b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN (SELECT local_1.x, NULL::integer AS y FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) local_1) local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) a, (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) b, (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) c, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) d ORDER BY b.x, b.y, c.a, c.b
|
||||
count | x | y | a | b | count
|
||||
---------------------------------------------------------------------
|
||||
100 | 3 | 2 | 3 | 2 | 0
|
||||
|
@ -399,12 +398,12 @@ BEGIN;
|
|||
-- we wont see the modifying cte in this query because we will use local execution and
|
||||
-- in postgres we wouldn't see this modifying cte, so it is consistent with postgres.
|
||||
WITH a AS (SELECT count(*) FROM test), b AS (INSERT INTO local VALUES (3,2) RETURNING *), c AS (INSERT INTO ref SELECT *,* FROM generate_series(1,10) RETURNING *), d AS (SELECT count(*) FROM ref JOIN local ON (a = x)) SELECT * FROM a, b, c, d ORDER BY x,y,a,b;
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503000 test WHERE true
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503003 test WHERE true
|
||||
NOTICE: executing the copy locally for colocated file with shard xxxxx
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_1503020'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) RETURNING citus_table_alias.a, citus_table_alias.b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN coordinator_shouldhaveshards.local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) a, (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) b, (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) c, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) d ORDER BY b.x, b.y, c.a, c.b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503000 test WHERE true
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503003 test WHERE true
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN (SELECT local_1.x, NULL::integer AS y FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) local_1) local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) a, (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) b, (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) c, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) d ORDER BY b.x, b.y, c.a, c.b
|
||||
count | x | y | a | b | count
|
||||
---------------------------------------------------------------------
|
||||
100 | 3 | 2 | 1 | 1 | 0
|
||||
|
@ -434,11 +433,11 @@ NOTICE: executing the command locally: SELECT ref.a, ref.b, local.x, local.y FR
|
|||
-- we wont see the modifying cte in this query because we will use local execution and
|
||||
-- in postgres we wouldn't see this modifying cte, so it is consistent with postgres.
|
||||
WITH a AS (SELECT count(*) FROM test), b AS (INSERT INTO local VALUES (3,2) RETURNING *), c AS (INSERT INTO ref VALUES (3,2) RETURNING *), d AS (SELECT count(*) FROM ref JOIN local ON (a = x)) SELECT * FROM a, b, c, d ORDER BY x,y,a,b;
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 (a, b) VALUES (3, 2) RETURNING a, b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503000 test WHERE true
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM coordinator_shouldhaveshards.test_1503003 test WHERE true
|
||||
NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_1503020 (a, b) VALUES (3, 2) RETURNING a, b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN coordinator_shouldhaveshards.local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) a, (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) b, (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) c, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) d ORDER BY b.x, b.y, c.a, c.b
|
||||
NOTICE: executing the command locally: SELECT count(*) AS count FROM (coordinator_shouldhaveshards.ref_1503020 ref JOIN (SELECT local_1.x, NULL::integer AS y FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) local_1) local ON ((ref.a OPERATOR(pg_catalog.=) local.x)))
|
||||
NOTICE: executing the command locally: SELECT a.count, b.x, b.y, c.a, c.b, d.count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) a, (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) b, (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) c, (SELECT intermediate_result.count FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) d ORDER BY b.x, b.y, c.a, c.b
|
||||
count | x | y | a | b | count
|
||||
---------------------------------------------------------------------
|
||||
100 | 3 | 2 | 3 | 2 | 1
|
||||
|
@ -489,7 +488,7 @@ NOTICE: executing the command locally: SELECT ref.a, ref.b, local.x, local.y, c
|
|||
-- full router query with CTE and local
|
||||
WITH cte_1 AS (SELECT * FROM ref LIMIT 1)
|
||||
SELECT * FROM ref JOIN local ON (a = x) JOIN cte_1 ON (local.x = cte_1.a);
|
||||
NOTICE: executing the command locally: WITH cte_1 AS (SELECT ref_1.a, ref_1.b FROM coordinator_shouldhaveshards.ref_1503020 ref_1 LIMIT 1) SELECT ref.a, ref.b, local.x, local.y, cte_1.a, cte_1.b FROM ((coordinator_shouldhaveshards.ref_1503020 ref JOIN coordinator_shouldhaveshards.local ON ((ref.a OPERATOR(pg_catalog.=) local.x))) JOIN cte_1 ON ((local.x OPERATOR(pg_catalog.=) cte_1.a)))
|
||||
NOTICE: executing the command locally: SELECT ref.a, ref.b, local.x, local.y, cte_1.a, cte_1.b FROM ((coordinator_shouldhaveshards.ref_1503020 ref JOIN coordinator_shouldhaveshards.local ON ((ref.a OPERATOR(pg_catalog.=) local.x))) JOIN (SELECT ref_1.a, ref_1.b FROM coordinator_shouldhaveshards.ref_1503020 ref_1 LIMIT 1) cte_1 ON ((local.x OPERATOR(pg_catalog.=) cte_1.a)))
|
||||
a | b | x | y | a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | 2 | 1 | 2 | 1 | 2
|
||||
|
@ -531,7 +530,6 @@ NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1
|
|||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
RESET citus.enable_cte_inlining;
|
||||
CREATE table ref_table(x int PRIMARY KEY, y int);
|
||||
-- this will be replicated to the coordinator because of add_coordinator test
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
|
|
@ -342,7 +342,7 @@ BEGIN;
|
|||
$update_value$ LANGUAGE plpgsql;
|
||||
CREATE TRIGGER update_value_dist
|
||||
AFTER INSERT ON citus_local_table_6
|
||||
FOR EACH ROW EXECUTE FUNCTION update_value();
|
||||
FOR EACH ROW EXECUTE PROCEDURE update_value();
|
||||
-- show that we error out as we don't supprt triggers on distributed tables
|
||||
SELECT create_distributed_table('citus_local_table_6', 'col_1');
|
||||
ERROR: cannot distribute relation "citus_local_table_6" because it has triggers
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,646 +0,0 @@
|
|||
--
|
||||
-- CUSTOM_AGGREGATE_SUPPORT
|
||||
--
|
||||
-- Create HLL extension if present, print false result otherwise
|
||||
SELECT CASE WHEN COUNT(*) > 0 THEN
|
||||
'CREATE EXTENSION HLL'
|
||||
ELSE 'SELECT false AS hll_present' END
|
||||
AS create_cmd FROM pg_available_extensions()
|
||||
WHERE name = 'hll'
|
||||
\gset
|
||||
:create_cmd;
|
||||
SET citus.shard_count TO 4;
|
||||
set citus.coordinator_aggregation_strategy to 'disabled';
|
||||
CREATE TABLE raw_table (day date, user_id int);
|
||||
CREATE TABLE daily_uniques(day date, unique_users hll);
|
||||
SELECT create_distributed_table('raw_table', 'user_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('daily_uniques', 'day');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO raw_table
|
||||
SELECT day, user_id % 19
|
||||
FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,100) as g(user_id);
|
||||
INSERT INTO raw_table
|
||||
SELECT day, user_id % 13
|
||||
FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,100) as g(user_id);
|
||||
-- Run hll on raw data
|
||||
SELECT hll_cardinality(hll_union_agg(agg))
|
||||
FROM (
|
||||
SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg
|
||||
FROM raw_table)a;
|
||||
hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
19
|
||||
(1 row)
|
||||
|
||||
-- Aggregate the data into daily_uniques
|
||||
INSERT INTO daily_uniques
|
||||
SELECT day, hll_add_agg(hll_hash_integer(user_id))
|
||||
FROM raw_table
|
||||
GROUP BY 1;
|
||||
-- Basic hll_cardinality check on aggregated data
|
||||
SELECT day, hll_cardinality(unique_users)
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
ORDER BY 2 DESC,1
|
||||
LIMIT 10;
|
||||
day | hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
06-20-2018 | 19
|
||||
06-21-2018 | 19
|
||||
06-22-2018 | 19
|
||||
06-23-2018 | 19
|
||||
06-24-2018 | 19
|
||||
06-25-2018 | 13
|
||||
06-26-2018 | 13
|
||||
06-27-2018 | 13
|
||||
06-28-2018 | 13
|
||||
06-29-2018 | 13
|
||||
(10 rows)
|
||||
|
||||
-- Union aggregated data for one week
|
||||
SELECT hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date;
|
||||
hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
19
|
||||
(1 row)
|
||||
|
||||
SELECT EXTRACT(MONTH FROM day) AS month, hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-06-23' AND day <= '2018-07-01'
|
||||
GROUP BY 1
|
||||
ORDER BY 1;
|
||||
month | hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
6 | 19
|
||||
7 | 13
|
||||
(2 rows)
|
||||
|
||||
SELECT day, hll_cardinality(hll_union_agg(unique_users) OVER seven_days)
|
||||
FROM daily_uniques
|
||||
WINDOW seven_days AS (ORDER BY day ASC ROWS 6 PRECEDING)
|
||||
ORDER BY 1;
|
||||
day | hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
05-24-2018 | 19
|
||||
05-25-2018 | 19
|
||||
05-26-2018 | 19
|
||||
05-27-2018 | 19
|
||||
05-28-2018 | 19
|
||||
05-29-2018 | 19
|
||||
05-30-2018 | 19
|
||||
05-31-2018 | 19
|
||||
06-01-2018 | 19
|
||||
06-02-2018 | 19
|
||||
06-03-2018 | 19
|
||||
06-04-2018 | 19
|
||||
06-05-2018 | 19
|
||||
06-06-2018 | 19
|
||||
06-07-2018 | 19
|
||||
06-08-2018 | 19
|
||||
06-09-2018 | 19
|
||||
06-10-2018 | 19
|
||||
06-11-2018 | 19
|
||||
06-12-2018 | 19
|
||||
06-13-2018 | 19
|
||||
06-14-2018 | 19
|
||||
06-15-2018 | 19
|
||||
06-16-2018 | 19
|
||||
06-17-2018 | 19
|
||||
06-18-2018 | 19
|
||||
06-19-2018 | 19
|
||||
06-20-2018 | 19
|
||||
06-21-2018 | 19
|
||||
06-22-2018 | 19
|
||||
06-23-2018 | 19
|
||||
06-24-2018 | 19
|
||||
06-25-2018 | 19
|
||||
06-26-2018 | 19
|
||||
06-27-2018 | 19
|
||||
06-28-2018 | 19
|
||||
06-29-2018 | 19
|
||||
06-30-2018 | 19
|
||||
07-01-2018 | 13
|
||||
07-02-2018 | 13
|
||||
07-03-2018 | 13
|
||||
07-04-2018 | 13
|
||||
07-05-2018 | 13
|
||||
07-06-2018 | 13
|
||||
07-07-2018 | 13
|
||||
07-08-2018 | 13
|
||||
07-09-2018 | 13
|
||||
07-10-2018 | 13
|
||||
(48 rows)
|
||||
|
||||
SELECT day, (hll_cardinality(hll_union_agg(unique_users) OVER two_days)) - hll_cardinality(unique_users) AS lost_uniques
|
||||
FROM daily_uniques
|
||||
WINDOW two_days AS (ORDER BY day ASC ROWS 1 PRECEDING)
|
||||
ORDER BY 1;
|
||||
day | lost_uniques
|
||||
---------------------------------------------------------------------
|
||||
05-24-2018 | 0
|
||||
05-25-2018 | 0
|
||||
05-26-2018 | 0
|
||||
05-27-2018 | 0
|
||||
05-28-2018 | 0
|
||||
05-29-2018 | 0
|
||||
05-30-2018 | 0
|
||||
05-31-2018 | 0
|
||||
06-01-2018 | 0
|
||||
06-02-2018 | 0
|
||||
06-03-2018 | 0
|
||||
06-04-2018 | 0
|
||||
06-05-2018 | 0
|
||||
06-06-2018 | 0
|
||||
06-07-2018 | 0
|
||||
06-08-2018 | 0
|
||||
06-09-2018 | 0
|
||||
06-10-2018 | 0
|
||||
06-11-2018 | 0
|
||||
06-12-2018 | 0
|
||||
06-13-2018 | 0
|
||||
06-14-2018 | 0
|
||||
06-15-2018 | 0
|
||||
06-16-2018 | 0
|
||||
06-17-2018 | 0
|
||||
06-18-2018 | 0
|
||||
06-19-2018 | 0
|
||||
06-20-2018 | 0
|
||||
06-21-2018 | 0
|
||||
06-22-2018 | 0
|
||||
06-23-2018 | 0
|
||||
06-24-2018 | 0
|
||||
06-25-2018 | 6
|
||||
06-26-2018 | 0
|
||||
06-27-2018 | 0
|
||||
06-28-2018 | 0
|
||||
06-29-2018 | 0
|
||||
06-30-2018 | 0
|
||||
07-01-2018 | 0
|
||||
07-02-2018 | 0
|
||||
07-03-2018 | 0
|
||||
07-04-2018 | 0
|
||||
07-05-2018 | 0
|
||||
07-06-2018 | 0
|
||||
07-07-2018 | 0
|
||||
07-08-2018 | 0
|
||||
07-09-2018 | 0
|
||||
07-10-2018 | 0
|
||||
(48 rows)
|
||||
|
||||
-- Test disabling hash_agg on coordinator query
|
||||
SET citus.explain_all_tasks to true;
|
||||
SET hll.force_groupagg to OFF;
|
||||
EXPLAIN(COSTS OFF)
|
||||
SELECT
|
||||
day, hll_union_agg(unique_users)
|
||||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
(23 rows)
|
||||
|
||||
SET hll.force_groupagg to ON;
|
||||
EXPLAIN(COSTS OFF)
|
||||
SELECT
|
||||
day, hll_union_agg(unique_users)
|
||||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
(23 rows)
|
||||
|
||||
-- Test disabling hash_agg with operator on coordinator query
|
||||
SET hll.force_groupagg to OFF;
|
||||
EXPLAIN(COSTS OFF)
|
||||
SELECT
|
||||
day, hll_union_agg(unique_users) || hll_union_agg(unique_users)
|
||||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
(23 rows)
|
||||
|
||||
SET hll.force_groupagg to ON;
|
||||
EXPLAIN(COSTS OFF)
|
||||
SELECT
|
||||
day, hll_union_agg(unique_users) || hll_union_agg(unique_users)
|
||||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
(23 rows)
|
||||
|
||||
-- Test disabling hash_agg with expression on coordinator query
|
||||
SET hll.force_groupagg to OFF;
|
||||
EXPLAIN(COSTS OFF)
|
||||
SELECT
|
||||
day, hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
(23 rows)
|
||||
|
||||
SET hll.force_groupagg to ON;
|
||||
EXPLAIN(COSTS OFF)
|
||||
SELECT
|
||||
day, hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
(23 rows)
|
||||
|
||||
-- Test disabling hash_agg with having
|
||||
SET hll.force_groupagg to OFF;
|
||||
EXPLAIN(COSTS OFF)
|
||||
SELECT
|
||||
day, hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
(23 rows)
|
||||
|
||||
SET hll.force_groupagg to ON;
|
||||
EXPLAIN(COSTS OFF)
|
||||
SELECT
|
||||
day, hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1)
|
||||
HAVING hll_cardinality(hll_union_agg(unique_users)) > 1;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
Tasks Shown: All
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Group Key: day
|
||||
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)
|
||||
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
|
||||
(27 rows)
|
||||
|
||||
DROP TABLE raw_table;
|
||||
DROP TABLE daily_uniques;
|
||||
-- Check if TopN aggregates work as expected
|
||||
-- Create TopN extension if present, print false result otherwise
|
||||
SELECT CASE WHEN COUNT(*) > 0 THEN
|
||||
'CREATE EXTENSION TOPN'
|
||||
ELSE 'SELECT false AS topn_present' END
|
||||
AS create_topn FROM pg_available_extensions()
|
||||
WHERE name = 'topn'
|
||||
\gset
|
||||
:create_topn;
|
||||
CREATE TABLE customer_reviews (day date, user_id int, review int);
|
||||
CREATE TABLE popular_reviewer(day date, reviewers jsonb);
|
||||
SELECT create_distributed_table('customer_reviews', 'user_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('popular_reviewer', 'day');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO customer_reviews
|
||||
SELECT day, user_id % 7, review % 5
|
||||
FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review);
|
||||
INSERT INTO customer_reviews
|
||||
SELECT day, user_id % 13, review % 3
|
||||
FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review);
|
||||
-- Run topn on raw data
|
||||
SELECT (topn(agg, 10)).*
|
||||
FROM (
|
||||
SELECT topn_add_agg(user_id::text) AS agg
|
||||
FROM customer_reviews
|
||||
)a
|
||||
ORDER BY 2 DESC, 1;
|
||||
item | frequency
|
||||
---------------------------------------------------------------------
|
||||
1 | 7843
|
||||
2 | 7843
|
||||
3 | 6851
|
||||
4 | 6851
|
||||
0 | 5890
|
||||
5 | 5890
|
||||
6 | 5890
|
||||
7 | 1922
|
||||
8 | 1922
|
||||
9 | 1922
|
||||
(10 rows)
|
||||
|
||||
-- Aggregate the data into popular_reviewer
|
||||
INSERT INTO popular_reviewer
|
||||
SELECT day, topn_add_agg(user_id::text)
|
||||
FROM customer_reviews
|
||||
GROUP BY 1;
|
||||
-- Basic topn check on aggregated data
|
||||
SELECT day, (topn(reviewers, 10)).*
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
ORDER BY 3 DESC, 1, 2
|
||||
LIMIT 10;
|
||||
day | item | frequency
|
||||
---------------------------------------------------------------------
|
||||
06-20-2018 | 1 | 248
|
||||
06-20-2018 | 2 | 248
|
||||
06-21-2018 | 1 | 248
|
||||
06-21-2018 | 2 | 248
|
||||
06-22-2018 | 1 | 248
|
||||
06-22-2018 | 2 | 248
|
||||
06-23-2018 | 1 | 248
|
||||
06-23-2018 | 2 | 248
|
||||
06-24-2018 | 1 | 248
|
||||
06-24-2018 | 2 | 248
|
||||
(10 rows)
|
||||
|
||||
-- Union aggregated data for one week
|
||||
SELECT (topn(agg, 10)).*
|
||||
FROM (
|
||||
SELECT topn_union_agg(reviewers) AS agg
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date
|
||||
)a
|
||||
ORDER BY 2 DESC, 1;
|
||||
item | frequency
|
||||
---------------------------------------------------------------------
|
||||
1 | 1240
|
||||
2 | 1240
|
||||
0 | 992
|
||||
3 | 992
|
||||
4 | 992
|
||||
5 | 992
|
||||
6 | 992
|
||||
(7 rows)
|
||||
|
||||
SELECT month, (topn(agg, 5)).*
|
||||
FROM (
|
||||
SELECT EXTRACT(MONTH FROM day) AS month, topn_union_agg(reviewers) AS agg
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-06-23' AND day <= '2018-07-01'
|
||||
GROUP BY 1
|
||||
ORDER BY 1
|
||||
)a
|
||||
ORDER BY 1, 3 DESC, 2;
|
||||
month | item | frequency
|
||||
---------------------------------------------------------------------
|
||||
6 | 1 | 1054
|
||||
6 | 2 | 1054
|
||||
6 | 3 | 992
|
||||
6 | 4 | 992
|
||||
6 | 0 | 744
|
||||
7 | 1 | 93
|
||||
7 | 2 | 93
|
||||
7 | 3 | 93
|
||||
7 | 4 | 93
|
||||
7 | 8 | 62
|
||||
(10 rows)
|
||||
|
||||
-- TODO the following queries will be supported after we fix #2265
|
||||
-- They work for PG9.6 but not for PG10
|
||||
SELECT (topn(topn_union_agg(reviewers), 10)).*
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date
|
||||
ORDER BY 2 DESC, 1;
|
||||
ERROR: set-valued function called in context that cannot accept a set
|
||||
SELECT (topn(topn_add_agg(user_id::text), 10)).*
|
||||
FROM customer_reviews
|
||||
ORDER BY 2 DESC, 1;
|
||||
ERROR: set-valued function called in context that cannot accept a set
|
||||
SELECT day, (topn(agg, 10)).*
|
||||
FROM (
|
||||
SELECT day, topn_union_agg(reviewers) OVER seven_days AS agg
|
||||
FROM popular_reviewer
|
||||
WINDOW seven_days AS (ORDER BY day ASC ROWS 6 PRECEDING)
|
||||
)a
|
||||
ORDER BY 3 DESC, 1, 2
|
||||
LIMIT 10;
|
||||
day | item | frequency
|
||||
---------------------------------------------------------------------
|
||||
06-16-2018 | 1 | 1736
|
||||
06-16-2018 | 2 | 1736
|
||||
06-17-2018 | 1 | 1736
|
||||
06-17-2018 | 2 | 1736
|
||||
06-18-2018 | 1 | 1736
|
||||
06-18-2018 | 2 | 1736
|
||||
06-19-2018 | 1 | 1736
|
||||
06-19-2018 | 2 | 1736
|
||||
06-20-2018 | 1 | 1736
|
||||
06-20-2018 | 2 | 1736
|
||||
(10 rows)
|
||||
|
||||
SELECT day, (topn(topn_add_agg(user_id::text) OVER seven_days, 10)).*
|
||||
FROM customer_reviews
|
||||
WINDOW seven_days AS (ORDER BY day ASC ROWS 6 PRECEDING)
|
||||
ORDER BY 3 DESC, 1, 2
|
||||
LIMIT 10;
|
||||
ERROR: set-valued function called in context that cannot accept a set
|
||||
DROP TABLE customer_reviews;
|
||||
DROP TABLE popular_reviewer;
|
|
@ -1,75 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven;
|
||||
version_above_eleven
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
SET citus.next_shard_id TO 20040000;
|
||||
CREATE SCHEMA xact_enum_type;
|
||||
SET search_path TO xact_enum_type;
|
||||
SET citus.shard_count TO 4;
|
||||
-- transaction block with simple type
|
||||
BEGIN;
|
||||
CREATE TYPE xact_enum_edit AS ENUM ('yes', 'no');
|
||||
CREATE TABLE t1 (a int PRIMARY KEY, b xact_enum_edit);
|
||||
SELECT create_distributed_table('t1','a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t1 VALUES (1, 'yes');
|
||||
SELECT * FROM t1;
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | yes
|
||||
(1 row)
|
||||
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
ALTER TYPE xact_enum_edit ADD VALUE 'maybe';
|
||||
ERROR: ALTER TYPE ... ADD cannot run inside a transaction block
|
||||
ABORT;
|
||||
-- maybe should not be on the workers
|
||||
SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;
|
||||
string_agg
|
||||
---------------------------------------------------------------------
|
||||
yes,no
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"yes,no")
|
||||
(localhost,57638,t,"yes,no")
|
||||
(2 rows)
|
||||
|
||||
BEGIN;
|
||||
ALTER TYPE xact_enum_edit ADD VALUE 'maybe';
|
||||
ERROR: ALTER TYPE ... ADD cannot run inside a transaction block
|
||||
COMMIT;
|
||||
-- maybe should be on the workers (pg12 and above)
|
||||
SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;
|
||||
string_agg
|
||||
---------------------------------------------------------------------
|
||||
yes,no
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"yes,no")
|
||||
(localhost,57638,t,"yes,no")
|
||||
(2 rows)
|
||||
|
||||
-- clear objects
|
||||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA xact_enum_type CASCADE;
|
||||
SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP SCHEMA")
|
||||
(localhost,57638,t,"DROP SCHEMA")
|
||||
(2 rows)
|
||||
|
|
@ -5,7 +5,7 @@ SET citus.shard_replication_factor to 1;
|
|||
SET citus.next_shard_id TO 16000000;
|
||||
-- CTE inlining should not happen because
|
||||
-- the tests rely on intermediate results
|
||||
SET citus.enable_cte_inlining TO false;
|
||||
-- That's why we use MATERIALIZED CTEs in the test file
|
||||
-- prevent using locally executing the intermediate results
|
||||
SET citus.task_assignment_policy TO "round-robin";
|
||||
SELECT pg_backend_pid() as pid \gset
|
||||
|
@ -31,11 +31,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()');
|
|||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
WITH local_cte AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH local_cte AS MATERIALIZED (
|
||||
SELECT * FROM users_table_local
|
||||
),
|
||||
dist_cte AS (
|
||||
dist_cte AS MATERIALIZED (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
|
@ -65,11 +65,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM cte_failure.even
|
|||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
WITH local_cte AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH local_cte AS MATERIALIZED (
|
||||
SELECT * FROM users_table_local
|
||||
),
|
||||
dist_cte AS (
|
||||
dist_cte AS MATERIALIZED (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
|
@ -98,11 +98,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").k
|
|||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
WITH local_cte AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH local_cte AS MATERIALIZED (
|
||||
SELECT * FROM users_table_local
|
||||
),
|
||||
dist_cte AS (
|
||||
dist_cte AS MATERIALIZED (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
|
@ -131,11 +131,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')');
|
|||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
WITH local_cte AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH local_cte AS MATERIALIZED (
|
||||
SELECT * FROM users_table_local
|
||||
),
|
||||
dist_cte AS (
|
||||
dist_cte AS MATERIALIZED (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
|
@ -162,11 +162,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM").cancel(' || :p
|
|||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
WITH local_cte AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH local_cte AS MATERIALIZED (
|
||||
SELECT * FROM users_table_local
|
||||
),
|
||||
dist_cte AS (
|
||||
dist_cte AS MATERIALIZED (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
|
@ -193,11 +193,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").c
|
|||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
WITH local_cte AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH local_cte AS MATERIALIZED (
|
||||
SELECT * FROM users_table_local
|
||||
),
|
||||
dist_cte AS (
|
||||
dist_cte AS MATERIALIZED (
|
||||
SELECT user_id FROM events_table
|
||||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
|
@ -238,7 +238,7 @@ SELECT * FROM users_table ORDER BY 1, 2;
|
|||
(5 rows)
|
||||
|
||||
-- following will delete and insert the same rows
|
||||
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
INSERT INTO users_table SELECT * FROM cte_delete;
|
||||
-- verify contents are the same
|
||||
SELECT * FROM users_table ORDER BY 1, 2;
|
||||
|
@ -258,7 +258,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()');
|
|||
|
||||
(1 row)
|
||||
|
||||
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
INSERT INTO users_table SELECT * FROM cte_delete;
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
|
@ -287,7 +287,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()');
|
|||
|
||||
(1 row)
|
||||
|
||||
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
INSERT INTO users_table SELECT * FROM cte_delete;
|
||||
ERROR: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
|
@ -317,7 +317,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").cancel(' || :pid || '
|
|||
|
||||
(1 row)
|
||||
|
||||
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
INSERT INTO users_table SELECT * FROM cte_delete;
|
||||
ERROR: canceling statement due to user request
|
||||
-- verify contents are the same
|
||||
|
@ -344,7 +344,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')');
|
|||
|
||||
(1 row)
|
||||
|
||||
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
INSERT INTO users_table SELECT * FROM cte_delete;
|
||||
ERROR: canceling statement due to user request
|
||||
-- verify contents are the same
|
||||
|
@ -373,7 +373,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()');
|
|||
|
||||
BEGIN;
|
||||
SET LOCAL citus.multi_shard_modify_mode = 'sequential';
|
||||
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
INSERT INTO users_table SELECT * FROM cte_delete;
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
|
|
|
@ -32,8 +32,6 @@ SELECT create_reference_table('ref_table');
|
|||
|
||||
(1 row)
|
||||
|
||||
-- prevent PG 11 - PG 12 outputs to diverge
|
||||
SET citus.enable_cte_inlining TO false;
|
||||
-- load some data
|
||||
INSERT INTO table_1 VALUES (1, '1'), (2, '2'), (3, '3'), (4, '4');
|
||||
INSERT INTO table_2 VALUES (3, '3'), (4, '4'), (5, '5'), (6, '6');
|
||||
|
@ -43,7 +41,7 @@ INSERT INTO ref_table VALUES (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'),
|
|||
SET client_min_messages TO DEBUG1;
|
||||
-- a very basic case, where the intermediate result
|
||||
-- should go to both workers
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key FROM table_1 WHERE value IN ('3', '4'))
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -61,7 +59,7 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
|||
-- a very basic case, where the intermediate result
|
||||
-- should only go to one worker because the final query is a router
|
||||
-- we use random() to prevent postgres inline the CTE(s)
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4'))
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -78,7 +76,7 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
|||
-- a similar query, but with a reference table now
|
||||
-- given that reference tables are replicated to all nodes
|
||||
-- we have to broadcast to all nodes
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4'))
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -95,9 +93,9 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
|||
|
||||
-- a similar query as above, but this time use the CTE inside
|
||||
-- another CTE
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -115,9 +113,9 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
|
||||
-- the second CTE does a join with a distributed table
|
||||
-- and the final query is a router query
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1 JOIN table_2 USING (key))
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -137,9 +135,9 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
-- the first CTE is used both within second CTE and the final query
|
||||
-- the second CTE does a join with a distributed table
|
||||
-- and the final query is a router query
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1 JOIN table_2 USING (key))
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -159,9 +157,9 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
-- the first CTE is used both within second CTE and the final query
|
||||
-- the second CTE does a join with a distributed table but a router query on a worker
|
||||
-- and the final query is another router query on another worker
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1 JOIN table_2 USING (key) WHERE table_2.key = 1)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -182,9 +180,9 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
-- the second CTE does a join with a distributed table but a router query on a worker
|
||||
-- and the final query is a router query on the same worker, so the first result is only
|
||||
-- broadcasted to a single node
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1 JOIN table_2 USING (key) WHERE table_2.key = 1)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -201,9 +199,9 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
(1 row)
|
||||
|
||||
-- the same query with the above, but the final query is hitting all shards
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1 JOIN table_2 USING (key))
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -224,9 +222,9 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
-- even if we add a filter on the first query and make it a router query,
|
||||
-- the first intermediate result still hits all workers because of the final
|
||||
-- join is hitting all workers
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1 JOIN table_2 USING (key) WHERE table_2.key = 3)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -247,7 +245,7 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
-- the reference table is joined with a distributed table and an intermediate
|
||||
-- result, but the distributed table hits all shards, so the intermediate
|
||||
-- result is sent to all nodes
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM ref_table WHERE value IN ('3', '4'))
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -264,7 +262,7 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
|||
|
||||
-- similar query as above, but this time the whole query is a router
|
||||
-- query, so no intermediate results
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM ref_table WHERE value IN ('3', '4'))
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -279,9 +277,9 @@ FROM
|
|||
-- so the first CTE should only be broadcasted to that node
|
||||
-- since the final query doesn't have a join, it should simply be broadcasted
|
||||
-- to one node
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1 JOIN table_2 USING (key) WHERE key = 1)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -299,10 +297,10 @@ DEBUG: Subplan XXX_2 will be written to local file
|
|||
|
||||
-- the same query inlined inside a CTE, and the final query has a
|
||||
-- join with a distributed table
|
||||
WITH top_cte as (
|
||||
WITH some_values_1 AS
|
||||
WITH top_cte as MATERIALIZED (
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1 JOIN table_2 USING (key) WHERE key = 1)
|
||||
SELECT
|
||||
DISTINCT key
|
||||
|
@ -313,7 +311,7 @@ SELECT
|
|||
count(*)
|
||||
FROM
|
||||
top_cte JOIN table_2 USING (key);
|
||||
DEBUG: generating subplan XXX_1 for CTE top_cte: WITH some_values_1 AS (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))), some_values_2 AS (SELECT some_values_1.key, random() AS random FROM (some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)) SELECT DISTINCT key FROM some_values_2
|
||||
DEBUG: generating subplan XXX_1 for CTE top_cte: WITH some_values_1 AS MATERIALIZED (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))), some_values_2 AS MATERIALIZED (SELECT some_values_1.key, random() AS random FROM (some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)) SELECT DISTINCT key FROM some_values_2
|
||||
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
|
||||
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2
|
||||
|
@ -329,10 +327,10 @@ DEBUG: Subplan XXX_2 will be written to local file
|
|||
|
||||
-- very much the same query, but this time the top query is also a router query
|
||||
-- on a single worker, so all intermediate results only hit a single node
|
||||
WITH top_cte as (
|
||||
WITH some_values_1 AS
|
||||
WITH top_cte as MATERIALIZED (
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1 JOIN table_2 USING (key) WHERE key = 1)
|
||||
SELECT
|
||||
DISTINCT key
|
||||
|
@ -343,7 +341,7 @@ SELECT
|
|||
count(*)
|
||||
FROM
|
||||
top_cte JOIN table_2 USING (key) WHERE table_2.key = 2;
|
||||
DEBUG: generating subplan XXX_1 for CTE top_cte: WITH some_values_1 AS (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))), some_values_2 AS (SELECT some_values_1.key, random() AS random FROM (some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)) SELECT DISTINCT key FROM some_values_2
|
||||
DEBUG: generating subplan XXX_1 for CTE top_cte: WITH some_values_1 AS MATERIALIZED (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))), some_values_2 AS MATERIALIZED (SELECT some_values_1.key, random() AS random FROM (some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)) SELECT DISTINCT key FROM some_values_2
|
||||
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
|
||||
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2
|
||||
|
@ -358,11 +356,11 @@ DEBUG: Subplan XXX_2 will be written to local file
|
|||
|
||||
-- some_values_1 is first used by a single shard-query, and than with a multi-shard
|
||||
-- CTE, finally a cartesian product join
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1 JOIN table_2 USING (key) WHERE key = 1),
|
||||
some_values_3 AS
|
||||
some_values_3 AS MATERIALIZED
|
||||
(SELECT key FROM (some_values_2 JOIN table_2 USING (key)) JOIN some_values_1 USING (key))
|
||||
SELECT * FROM some_values_3 JOIN ref_table ON (true);
|
||||
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
|
||||
|
@ -381,9 +379,9 @@ DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
|
|||
|
||||
-- join on intermediate results, so should only
|
||||
-- go to a single node
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_2 WHERE value IN ('3', '4'))
|
||||
SELECT count(*) FROM some_values_2 JOIN some_values_1 USING (key);
|
||||
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
|
||||
|
@ -398,9 +396,9 @@ DEBUG: Subplan XXX_2 will be written to local file
|
|||
|
||||
-- same query with WHERE false make sure that we're not broken
|
||||
-- for such edge cases
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_2 WHERE value IN ('3', '4'))
|
||||
SELECT count(*) FROM some_values_2 JOIN some_values_1 USING (key) WHERE false;
|
||||
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
|
||||
|
@ -415,11 +413,11 @@ DEBUG: Subplan XXX_2 will be written to local file
|
|||
|
||||
-- do not use some_values_2 at all, so only 2 intermediate results are
|
||||
-- broadcasted
|
||||
WITH some_values_1 AS
|
||||
WITH some_values_1 AS MATERIALIZED
|
||||
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4')),
|
||||
some_values_2 AS
|
||||
some_values_2 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1),
|
||||
some_values_3 AS
|
||||
some_values_3 AS MATERIALIZED
|
||||
(SELECT key, random() FROM some_values_1)
|
||||
SELECT
|
||||
count(*)
|
||||
|
@ -566,13 +564,13 @@ DEBUG: Subplan XXX_2 will be written to local file
|
|||
(0 rows)
|
||||
|
||||
-- the intermediate results should just hit a single worker
|
||||
WITH cte_1 AS
|
||||
WITH cte_1 AS MATERIALIZED
|
||||
(
|
||||
(SELECT key FROM table_1 WHERE key = 1)
|
||||
INTERSECT
|
||||
(SELECT key FROM table_1 WHERE key = 2)
|
||||
),
|
||||
cte_2 AS
|
||||
cte_2 AS MATERIALIZED
|
||||
(
|
||||
(SELECT key FROM table_1 WHERE key = 3)
|
||||
INTERSECT
|
||||
|
@ -599,13 +597,13 @@ DEBUG: Subplan XXX_2 will be written to local file
|
|||
-- we join the results with distributed tables
|
||||
-- so cte_1 should hit all workers, but still the
|
||||
-- others should hit single worker each
|
||||
WITH cte_1 AS
|
||||
WITH cte_1 AS MATERIALIZED
|
||||
(
|
||||
(SELECT key FROM table_1 WHERE key = 1)
|
||||
INTERSECT
|
||||
(SELECT key FROM table_1 WHERE key = 2)
|
||||
),
|
||||
cte_2 AS
|
||||
cte_2 AS MATERIALIZED
|
||||
(
|
||||
SELECT count(*) FROM table_1 JOIN cte_1 USING (key)
|
||||
)
|
||||
|
@ -668,10 +666,10 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
|||
-- however, the subquery in WHERE clause of the DELETE query is broadcasted to all
|
||||
-- nodes
|
||||
BEGIN;
|
||||
WITH select_data AS (
|
||||
WITH select_data AS MATERIALIZED (
|
||||
SELECT * FROM table_1
|
||||
),
|
||||
raw_data AS (
|
||||
raw_data AS MATERIALIZED (
|
||||
DELETE FROM table_2 WHERE key >= (SELECT min(key) FROM select_data WHERE key > 1) RETURNING *
|
||||
)
|
||||
SELECT * FROM raw_data;
|
||||
|
@ -698,10 +696,10 @@ ROLLBACK;
|
|||
-- however, the subquery in WHERE clause of the DELETE query is broadcasted to all
|
||||
-- nodes
|
||||
BEGIN;
|
||||
WITH select_data AS (
|
||||
WITH select_data AS MATERIALIZED (
|
||||
SELECT * FROM table_1
|
||||
),
|
||||
raw_data AS (
|
||||
raw_data AS MATERIALIZED (
|
||||
DELETE FROM table_2 WHERE value::int >= (SELECT min(key) FROM select_data WHERE key > 1 + random()) RETURNING *
|
||||
)
|
||||
SELECT * FROM raw_data;
|
||||
|
@ -726,10 +724,10 @@ ROLLBACK;
|
|||
-- now, we need only two intermediate results as the subquery in WHERE clause is
|
||||
-- router plannable
|
||||
BEGIN;
|
||||
WITH select_data AS (
|
||||
WITH select_data AS MATERIALIZED (
|
||||
SELECT * FROM table_1
|
||||
),
|
||||
raw_data AS (
|
||||
raw_data AS MATERIALIZED (
|
||||
DELETE FROM table_2 WHERE value::int >= (SELECT min(key) FROM table_1 WHERE key > random()) AND key = 6 RETURNING *
|
||||
)
|
||||
SELECT * FROM raw_data;
|
||||
|
@ -766,13 +764,13 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
|||
INSERT INTO table_1
|
||||
SELECT * FROM table_2 where key = 1 AND
|
||||
value::int IN
|
||||
(WITH cte_1 AS
|
||||
(WITH cte_1 AS MATERIALIZED
|
||||
(
|
||||
(SELECT key FROM table_1 WHERE key = 1)
|
||||
INTERSECT
|
||||
(SELECT key FROM table_1 WHERE key = 2)
|
||||
),
|
||||
cte_2 AS
|
||||
cte_2 AS MATERIALIZED
|
||||
(
|
||||
(SELECT key FROM table_1 WHERE key = 3)
|
||||
INTERSECT
|
||||
|
@ -800,13 +798,13 @@ DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
|
|||
-- hits all the shards because table_2.key != 1
|
||||
INSERT INTO table_1
|
||||
SELECT table_2.* FROM table_2,
|
||||
(WITH cte_1 AS
|
||||
(WITH cte_1 AS MATERIALIZED
|
||||
(
|
||||
(SELECT key FROM table_1 WHERE key = 1)
|
||||
INTERSECT
|
||||
(SELECT key FROM table_1 WHERE key = 2)
|
||||
),
|
||||
cte_2 AS
|
||||
cte_2 AS MATERIALIZED
|
||||
(
|
||||
(SELECT key FROM table_1 WHERE key = 3)
|
||||
INTERSECT
|
||||
|
@ -924,8 +922,9 @@ FROM
|
|||
WHERE
|
||||
range_column IN ('A', 'E') AND
|
||||
range_partitioned.data IN (SELECT data FROM some_data);
|
||||
DEBUG: generating subplan XXX_1 for CTE some_data: SELECT data FROM intermediate_result_pruning.range_partitioned
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) ANY (ARRAY['A'::text, 'E'::text])) AND (data OPERATOR(pg_catalog.=) ANY (SELECT some_data.data FROM (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)) some_data)))
|
||||
DEBUG: CTE some_data is going to be inlined via distributed planning
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT data FROM (SELECT range_partitioned.data FROM intermediate_result_pruning.range_partitioned) some_data
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) ANY (ARRAY['A'::text, 'E'::text])) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer))))
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
|
@ -955,16 +954,16 @@ INSERT INTO stats (account_id, spent) VALUES ('foo', 100);
|
|||
SELECT *
|
||||
FROM
|
||||
(
|
||||
WITH accounts_cte AS (
|
||||
WITH accounts_cte AS MATERIALIZED (
|
||||
SELECT id AS account_id
|
||||
FROM accounts
|
||||
),
|
||||
joined_stats_cte_1 AS (
|
||||
joined_stats_cte_1 AS MATERIALIZED (
|
||||
SELECT spent, account_id
|
||||
FROM stats
|
||||
INNER JOIN accounts_cte USING (account_id)
|
||||
),
|
||||
joined_stats_cte_2 AS (
|
||||
joined_stats_cte_2 AS MATERIALIZED (
|
||||
SELECT spent, account_id
|
||||
FROM joined_stats_cte_1
|
||||
INNER JOIN accounts_cte USING (account_id)
|
||||
|
@ -992,16 +991,16 @@ SET citus.task_assignment_policy to 'round-robin';
|
|||
SELECT *
|
||||
FROM
|
||||
(
|
||||
WITH accounts_cte AS (
|
||||
WITH accounts_cte AS MATERIALIZED (
|
||||
SELECT id AS account_id
|
||||
FROM accounts
|
||||
),
|
||||
joined_stats_cte_1 AS (
|
||||
joined_stats_cte_1 AS MATERIALIZED (
|
||||
SELECT spent, account_id
|
||||
FROM stats
|
||||
INNER JOIN accounts_cte USING (account_id)
|
||||
),
|
||||
joined_stats_cte_2 AS (
|
||||
joined_stats_cte_2 AS MATERIALIZED (
|
||||
SELECT spent, account_id
|
||||
FROM joined_stats_cte_1
|
||||
INNER JOIN accounts_cte USING (account_id)
|
||||
|
@ -1027,10 +1026,10 @@ RESET citus.task_assignment_policy;
|
|||
-- Insert..select is planned differently, make sure we have results everywhere.
|
||||
-- We put the insert..select in a CTE here to prevent the CTE from being moved
|
||||
-- into the select, which would follow the regular code path for select.
|
||||
WITH stats AS (
|
||||
WITH stats AS MATERIALIZED (
|
||||
SELECT count(key) m FROM table_3
|
||||
),
|
||||
inserts AS (
|
||||
inserts AS MATERIALIZED (
|
||||
INSERT INTO table_2
|
||||
SELECT key, count(*)
|
||||
FROM table_1
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
SET citus.enable_repartition_joins to ON;
|
||||
-- prevent PG 11 - PG 12 outputs to diverge
|
||||
SET citus.enable_cte_inlining TO false;
|
||||
SET citus.max_intermediate_result_size TO 2;
|
||||
-- should fail because the copy size is ~4kB for each cte
|
||||
WITH cte AS
|
||||
WITH cte AS MATERIALIZED
|
||||
(
|
||||
SELECT * FROM users_table
|
||||
),
|
||||
cte2 AS (
|
||||
cte2 AS MATERIALIZED (
|
||||
SELECT * FROM events_table
|
||||
)
|
||||
SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10;
|
||||
|
@ -15,7 +13,7 @@ ERROR: the intermediate result size exceeds citus.max_intermediate_result_size
|
|||
DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place.
|
||||
HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
|
||||
SET citus.max_intermediate_result_size TO 9;
|
||||
WITH cte AS
|
||||
WITH cte AS MATERIALIZED
|
||||
(
|
||||
SELECT
|
||||
users_table.user_id, users_table.value_1, users_table.value_2
|
||||
|
@ -26,7 +24,7 @@ WITH cte AS
|
|||
on
|
||||
(users_table.value_3=events_table.value_3)
|
||||
),
|
||||
cte2 AS (
|
||||
cte2 AS MATERIALIZED(
|
||||
SELECT * FROM events_table
|
||||
)
|
||||
SELECT
|
||||
|
@ -53,7 +51,7 @@ LIMIT 10;
|
|||
-- router queries should be able to get limitted too
|
||||
SET citus.max_intermediate_result_size TO 2;
|
||||
-- this should pass, since we fetch small portions in each subplan
|
||||
with cte as (select * from users_table where user_id=1),
|
||||
with cte as MATERIALIZED (select * from users_table where user_id=1),
|
||||
cte2 as (select * from users_table where user_id=2),
|
||||
cte3 as (select * from users_table where user_id=3),
|
||||
cte4 as (select * from users_table where user_id=4),
|
||||
|
@ -75,18 +73,18 @@ UNION
|
|||
(1 row)
|
||||
|
||||
-- if we fetch the same amount of data at once, it should fail
|
||||
WITH cte AS (SELECT * FROM users_table WHERE user_id IN (1,2,3,4,5))
|
||||
WITH cte AS MATERIALIZED (SELECT * FROM users_table WHERE user_id IN (1,2,3,4,5))
|
||||
SELECT * FROM cte ORDER BY 1,2,3,4,5 LIMIT 10;
|
||||
ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 2 kB)
|
||||
DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place.
|
||||
HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
|
||||
SET citus.max_intermediate_result_size TO 0;
|
||||
-- this should fail
|
||||
WITH cte AS (SELECT * FROM users_table WHERE user_id=1),
|
||||
cte2 AS (SELECT * FROM users_table WHERE user_id=2),
|
||||
cte3 AS (SELECT * FROM users_table WHERE user_id=3),
|
||||
cte4 AS (SELECT * FROM users_table WHERE user_id=4),
|
||||
cte5 AS (SELECT * FROM users_table WHERE user_id=5)
|
||||
WITH cte AS MATERIALIZED (SELECT * FROM users_table WHERE user_id=1),
|
||||
cte2 AS MATERIALIZED (SELECT * FROM users_table WHERE user_id=2),
|
||||
cte3 AS MATERIALIZED (SELECT * FROM users_table WHERE user_id=3),
|
||||
cte4 AS MATERIALIZED (SELECT * FROM users_table WHERE user_id=4),
|
||||
cte5 AS MATERIALIZED (SELECT * FROM users_table WHERE user_id=5)
|
||||
SELECT * FROM (
|
||||
(SELECT * FROM cte)
|
||||
UNION
|
||||
|
@ -104,11 +102,11 @@ HINT: To run the current query, set citus.max_intermediate_result_size to a hig
|
|||
-- this fails since cte-subplan exceeds limit even if cte2 and cte3 don't
|
||||
-- WHERE EXISTS forces materialization in pg12
|
||||
SET citus.max_intermediate_result_size TO 4;
|
||||
WITH cte AS (
|
||||
WITH cte2 AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH cte2 AS MATERIALIZED (
|
||||
SELECT * FROM users_table
|
||||
),
|
||||
cte3 AS (
|
||||
cte3 AS MATERIALIZED(
|
||||
SELECT * FROM events_table
|
||||
)
|
||||
SELECT * FROM cte2, cte3 WHERE cte2.user_id = cte3.user_id AND cte2.user_id = 1
|
||||
|
@ -123,11 +121,11 @@ SELECT count(*) FROM cte WHERE EXISTS (select * from cte);
|
|||
SET citus.max_intermediate_result_size TO 3;
|
||||
-- this should fail since the cte-subplan exceeds the limit even if the
|
||||
-- cte2 and cte3 does not
|
||||
WITH cte AS (
|
||||
WITH cte2 AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH cte2 AS MATERIALIZED (
|
||||
SELECT * FROM users_table WHERE user_id IN (3,4,5,6)
|
||||
),
|
||||
cte3 AS (
|
||||
cte3 AS MATERIALIZED(
|
||||
SELECT * FROM events_table WHERE event_type = 1
|
||||
)
|
||||
SELECT * FROM cte2, cte3 WHERE cte2.value_1 IN (SELECT value_2 FROM cte3)
|
||||
|
@ -138,16 +136,16 @@ DETAIL: Citus restricts the size of intermediate results of complex subqueries
|
|||
HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
|
||||
-- this will fail in remote execution
|
||||
SET citus.max_intermediate_result_size TO 2;
|
||||
WITH cte AS (
|
||||
WITH cte2 AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH cte2 AS MATERIALIZED (
|
||||
SELECT * FROM users_table WHERE user_id IN (1, 2)
|
||||
),
|
||||
cte3 AS (
|
||||
cte3 AS MATERIALIZED (
|
||||
SELECT * FROM users_table WHERE user_id = 3
|
||||
)
|
||||
SELECT * FROM cte2 UNION (SELECT * FROM cte3)
|
||||
),
|
||||
cte4 AS (
|
||||
cte4 AS MATERIALIZED (
|
||||
SELECT * FROM events_table
|
||||
)
|
||||
SELECT * FROM cte UNION ALL
|
||||
|
@ -157,16 +155,16 @@ DETAIL: Citus restricts the size of intermediate results of complex subqueries
|
|||
HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
|
||||
SET citus.max_intermediate_result_size TO 1;
|
||||
-- this will fail in router_executor
|
||||
WITH cte AS (
|
||||
WITH cte2 AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH cte2 AS MATERIALIZED (
|
||||
SELECT * FROM users_table WHERE user_id IN (1, 2)
|
||||
),
|
||||
cte3 AS (
|
||||
cte3 AS MATERIALIZED (
|
||||
SELECT * FROM users_table WHERE user_id = 3
|
||||
)
|
||||
SELECT * FROM cte2 UNION (SELECT * FROM cte3)
|
||||
),
|
||||
cte4 AS (
|
||||
cte4 AS MATERIALIZED (
|
||||
SELECT * FROM events_table
|
||||
)
|
||||
SELECT * FROM cte UNION ALL
|
||||
|
@ -174,19 +172,19 @@ SELECT * FROM cte4 ORDER BY 1,2,3,4,5 LIMIT 5;
|
|||
ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 1 kB)
|
||||
DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place.
|
||||
HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
|
||||
-- Below that, all should pass since -1 disables the limit
|
||||
-- Below that, all should pAS MATERIALIZEDs since -1 disables the limit
|
||||
SET citus.max_intermediate_result_size TO -1;
|
||||
-- real_time_executor + router_executor + real_time_executor will pass
|
||||
WITH cte AS (
|
||||
WITH cte2 AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH cte2 AS MATERIALIZED (
|
||||
SELECT * FROM users_table WHERE user_id IN (1, 2)
|
||||
),
|
||||
cte3 AS (
|
||||
cte3 AS MATERIALIZED (
|
||||
SELECT * FROM users_table WHERE user_id = 3
|
||||
)
|
||||
SELECT * FROM cte2 UNION (SELECT * FROM cte3)
|
||||
),
|
||||
cte4 AS (
|
||||
cte4 AS MATERIALIZED (
|
||||
SELECT * FROM events_table
|
||||
)
|
||||
SELECT * FROM cte UNION ALL
|
||||
|
@ -201,7 +199,7 @@ SELECT * FROM cte4 ORDER BY 1,2,3,4,5 LIMIT 5;
|
|||
(5 rows)
|
||||
|
||||
-- regular adaptive executor CTE, should work since -1 disables the limit
|
||||
WITH cte AS
|
||||
WITH cte AS MATERIALIZED
|
||||
(
|
||||
SELECT
|
||||
users_table.user_id, users_table.value_1, users_table.value_2
|
||||
|
@ -212,7 +210,7 @@ WITH cte AS
|
|||
on
|
||||
(users_table.value_2=events_table.value_2)
|
||||
),
|
||||
cte2 AS (
|
||||
cte2 AS MATERIALIZED (
|
||||
SELECT * FROM events_table
|
||||
)
|
||||
SELECT
|
||||
|
@ -237,11 +235,11 @@ LIMIT 10;
|
|||
(10 rows)
|
||||
|
||||
-- regular real-time CTE fetches around ~4kb data in each subplan
|
||||
WITH cte AS
|
||||
WITH cte AS MATERIALIZED
|
||||
(
|
||||
SELECT * FROM users_table
|
||||
),
|
||||
cte2 AS (
|
||||
cte2 AS MATERIALIZED (
|
||||
SELECT * FROM events_table
|
||||
)
|
||||
SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10;
|
||||
|
@ -260,7 +258,7 @@ SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10;
|
|||
(10 rows)
|
||||
|
||||
-- regular real-time query fetches ~4kB
|
||||
WITH cte AS
|
||||
WITH cte AS MATERIALIZED
|
||||
(
|
||||
SELECT * FROM users_table WHERE user_id IN (1,2,3,4,5)
|
||||
)
|
||||
|
@ -280,11 +278,11 @@ SELECT * FROM cte ORDER BY 1,2,3,4,5 LIMIT 10;
|
|||
(10 rows)
|
||||
|
||||
-- nested CTEs
|
||||
WITH cte AS (
|
||||
WITH cte2 AS (
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH cte2 AS MATERIALIZED (
|
||||
SELECT * FROM users_table
|
||||
),
|
||||
cte3 AS (
|
||||
cte3 AS MATERIALIZED (
|
||||
SELECT * FROM events_table
|
||||
)
|
||||
SELECT
|
||||
|
|
|
@ -34,14 +34,12 @@ INSERT INTO table_2 VALUES (3, '3'), (4, '4'), (5, '5'),
|
|||
INSERT INTO ref_table VALUES (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6');
|
||||
NOTICE: executing the command locally: INSERT INTO locally_execute_intermediate_results.ref_table_1580008 AS citus_table_alias (key, value) VALUES (1,'1'::text), (2,'2'::text), (3,'3'::text), (4,'4'::text), (5,'5'::text), (6,'6'::text)
|
||||
INSERT INTO local_table VALUES (3, '3'), (4, '4'), (5, '5'), (6, '6');
|
||||
-- prevent PG 11 - PG 12 outputs to diverge
|
||||
-- and have a lot more CTEs recursively planned for the
|
||||
-- sake of increasing the test coverage
|
||||
SET citus.enable_cte_inlining TO false;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
-- the query cannot be executed locally, but still because of
|
||||
-- HAVING the intermediate result is written to local file as well
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -58,7 +56,7 @@ DEBUG: Subplan XXX_1 will be written to local file
|
|||
(2 rows)
|
||||
|
||||
-- in this case, the HAVING Is also pushed down
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -76,8 +74,8 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
|||
(2 rows)
|
||||
|
||||
-- subquery in the WHERE part of the query can be executed locally
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -104,8 +102,8 @@ NOTICE: executing the command locally: SELECT key FROM (SELECT intermediate_res
|
|||
|
||||
-- subquery in the WHERE part of the query should not be executed locally
|
||||
-- because it can be pushed down with the jointree
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(key) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(key) FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -126,9 +124,9 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
(0 rows)
|
||||
|
||||
-- now all the intermediate results are safe to be in local files
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(key) FROM table_2),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(key) FROM table_2),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -151,8 +149,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
|
|||
|
||||
-- multiple CTEs are joined inside HAVING, so written to file
|
||||
-- locally, but nothing executed locally
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -171,8 +169,8 @@ DEBUG: Subplan XXX_2 will be written to local file
|
|||
(2 rows)
|
||||
|
||||
-- same as above, but HAVING pushed down to workers
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -195,9 +193,9 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
-- multiple CTEs are joined inside HAVING, so written to file
|
||||
-- locally, also the join tree contains only another CTE, so should be
|
||||
-- executed locally
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -221,7 +219,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
|
|||
-- now, the CTE is going to be written locally,
|
||||
-- plus that is going to be read locally because
|
||||
-- of the aggragate over the cte in HAVING
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -241,7 +239,7 @@ NOTICE: executing the command locally: SELECT max(max) AS max FROM (SELECT inte
|
|||
(2 rows)
|
||||
|
||||
-- same as above, but with HAVING pushed down
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -262,8 +260,8 @@ NOTICE: executing the command locally: SELECT max(max) AS max FROM (SELECT inte
|
|||
(2 rows)
|
||||
|
||||
-- two ctes are going to be written locally and executed locally
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT * FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -288,7 +286,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
|
|||
|
||||
-- this time the same CTE is both joined with a distributed
|
||||
-- table and used in HAVING
|
||||
WITH a AS (SELECT * FROM table_1 ORDER BY 1,2 DESC LIMIT 1)
|
||||
WITH a AS MATERIALIZED (SELECT * FROM table_1 ORDER BY 1,2 DESC LIMIT 1)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2 USING (key)
|
||||
|
@ -307,7 +305,7 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
|||
-- this time the same CTE is both joined with a distributed
|
||||
-- table and used in HAVING -- but used in another subquery/aggregate
|
||||
-- so one more level of recursive planning
|
||||
WITH a AS (SELECT * FROM table_1)
|
||||
WITH a AS MATERIALIZED (SELECT * FROM table_1)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2 USING (key)
|
||||
|
@ -327,13 +325,13 @@ NOTICE: executing the command locally: SELECT max(value) AS max FROM (SELECT in
|
|||
(1 row)
|
||||
|
||||
-- same query as the above, without the aggragate
|
||||
WITH a AS (SELECT max(key) as key, max(value) as value FROM ref_table)
|
||||
WITH a AS MATERIALIZED (SELECT max(key) as key, max(value) as value FROM ref_table)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN ref_table USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(ref_table.value) <= (SELECT value FROM a));
|
||||
NOTICE: executing the command locally: WITH a AS (SELECT max(ref_table_1.key) AS key, max(ref_table_1.value) AS value FROM locally_execute_intermediate_results.ref_table_1580008 ref_table_1) SELECT count(*) AS count, a.key FROM (a JOIN locally_execute_intermediate_results.ref_table_1580008 ref_table(key, value) USING (key)) GROUP BY a.key HAVING (max(ref_table.value) OPERATOR(pg_catalog.<=) (SELECT a_1.value FROM a a_1))
|
||||
NOTICE: executing the command locally: WITH a AS MATERIALIZED (SELECT max(ref_table_1.key) AS key, max(ref_table_1.value) AS value FROM locally_execute_intermediate_results.ref_table_1580008 ref_table_1) SELECT count(*) AS count, a.key FROM (a JOIN locally_execute_intermediate_results.ref_table_1580008 ref_table(key, value) USING (key)) GROUP BY a.key HAVING (max(ref_table.value) OPERATOR(pg_catalog.<=) (SELECT a_1.value FROM a a_1))
|
||||
count | key
|
||||
---------------------------------------------------------------------
|
||||
1 | 6
|
||||
|
@ -341,9 +339,9 @@ NOTICE: executing the command locally: WITH a AS (SELECT max(ref_table_1.key) A
|
|||
|
||||
-- some edge cases around CTEs used inside other CTEs
|
||||
-- everything can be executed locally
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) FROM cte_2)
|
||||
WITH cte_1 as MATERIALIZED (SELECT * FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) FROM cte_2)
|
||||
SELECT * FROM cte_3;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
|
@ -361,9 +359,9 @@ NOTICE: executing the command locally: SELECT max FROM (SELECT intermediate_res
|
|||
(1 row)
|
||||
|
||||
-- the join between cte_3 and table_2 has to happen remotely
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) as key FROM cte_2)
|
||||
WITH cte_1 as MATERIALIZED (SELECT * FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) as key FROM cte_2)
|
||||
SELECT * FROM cte_3 JOIN table_2 USING (key) WHERE table_2.key = 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
|
@ -379,9 +377,9 @@ NOTICE: executing the command locally: SELECT max(key) AS key FROM (SELECT inte
|
|||
(0 rows)
|
||||
|
||||
-- the join between cte_3 and table_2 has to happen remotely
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) as key FROM cte_2)
|
||||
WITH cte_1 as MATERIALIZED (SELECT * FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) as key FROM cte_2)
|
||||
SELECT * FROM cte_3 JOIN ref_table USING (key);
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
|
@ -402,9 +400,9 @@ NOTICE: executing the command locally: SELECT cte_3.key, ref_table.value FROM (
|
|||
|
||||
-- some cases around router queries
|
||||
-- a router query, but the having has two cte joins
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT count(*)
|
||||
FROM table_2
|
||||
WHERE KEY = 3
|
||||
|
@ -422,9 +420,9 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
|
||||
-- a router query, but the having has two cte joins
|
||||
-- and the jointree has a join with another cte
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT count(*)
|
||||
FROM table_2 JOIN cte_3 USING(key)
|
||||
WHERE KEY = 3
|
||||
|
@ -444,9 +442,9 @@ DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
|
|||
|
||||
-- a router query, but the having has two cte joins
|
||||
-- and the jointree has a join with the same CTEs
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT count(*)
|
||||
FROM table_2 JOIN cte_3 USING(key) JOIN cte_2 ON (key = MAX::int) JOIN cte_1 USING(MAX)
|
||||
WHERE KEY = 3
|
||||
|
@ -465,8 +463,8 @@ DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
|
|||
(0 rows)
|
||||
|
||||
-- subPlans needed remotely as the subquery is pushed down
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_2)
|
||||
SELECT * FROM
|
||||
(SELECT key FROM table_1 GROUP BY key HAVING max(value) > (SELECT * FROM cte_1)) as foo,
|
||||
(SELECT key FROM table_2 GROUP BY key HAVING max(value) > (SELECT * FROM cte_2)) as bar
|
||||
|
@ -484,8 +482,8 @@ DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
|||
|
||||
-- the second subquery needs to be recursively planned due to non-colocated subquery join
|
||||
-- so cte_2 becomes part of master query of that recursive subquery planning
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_2)
|
||||
SELECT * FROM
|
||||
(SELECT value AS key FROM table_1 GROUP BY value HAVING max(value) > (SELECT * FROM cte_1)) as foo,
|
||||
(SELECT value AS key FROM table_2 GROUP BY value HAVING max(value) > (SELECT * FROM cte_2)) as bar
|
||||
|
@ -505,8 +503,8 @@ NOTICE: executing the command locally: SELECT foo.key, bar.key FROM (SELECT int
|
|||
(0 rows)
|
||||
|
||||
-- similar to above, but having pushed down
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_2)
|
||||
SELECT * FROM
|
||||
(SELECT key FROM table_1 GROUP BY key HAVING max(value) > (SELECT * FROM cte_1)) as foo,
|
||||
(SELECT key FROM table_2 GROUP BY key HAVING max(value) > (SELECT * FROM cte_2)) as bar
|
||||
|
@ -526,8 +524,8 @@ DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
|
|||
(0 rows)
|
||||
|
||||
-- now, forcing all subqueries to be on the local node
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_2)
|
||||
SELECT * FROM
|
||||
(SELECT value AS key FROM table_1 GROUP BY value HAVING max(value) > (SELECT * FROM cte_1) LIMIT 1) as foo,
|
||||
(SELECT value AS key FROM table_2 GROUP BY value HAVING max(value) > (SELECT * FROM cte_2) LIMIT 1) as bar
|
||||
|
@ -547,7 +545,7 @@ NOTICE: executing the command locally: SELECT foo.key, bar.key FROM (SELECT int
|
|||
(0 rows)
|
||||
|
||||
-- queries in which the last step has only CTEs can use local tables
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -564,8 +562,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM locally_ex
|
|||
1
|
||||
(2 rows)
|
||||
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -595,13 +593,9 @@ SET search_path TO locally_execute_intermediate_results;
|
|||
SET citus.log_intermediate_results TO TRUE;
|
||||
SET citus.log_local_commands TO TRUE;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
-- prevent PG 11 - PG 12 outputs to diverge
|
||||
-- and have a lot more CTEs recursively planned for the
|
||||
-- sake of increasing the test coverage
|
||||
SET citus.enable_cte_inlining TO false;
|
||||
-- the query cannot be executed locally, but still because of
|
||||
-- HAVING the intermediate result is written to local file as well
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -623,8 +617,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
|
||||
-- On non-mx case the subquery in the WHERE part of the query can be executed locally
|
||||
-- however, on Citus MX we have this limitation where the query cannot be executed locally
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -656,8 +650,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
|
||||
-- subquery in the WHERE part of the query should not be executed locally
|
||||
-- because it can be pushed down with the jointree
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(key) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(key) FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -685,9 +679,9 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
|
||||
-- although all the intermediate results are safe to be in local files
|
||||
-- we currently do not support it on Citus MX
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(key) FROM table_2),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(key) FROM table_2),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -716,8 +710,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
|
|||
|
||||
-- multiple CTEs are joined inside HAVING, so written to file
|
||||
-- locally, but nothing executed locally
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -744,9 +738,9 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
-- multiple CTEs are joined inside HAVING, so written to file
|
||||
-- locally, also the join tree contains only another CTE, so should be
|
||||
-- executed locally, but not on an Citus MX worker
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -777,7 +771,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
|
|||
-- plus that could have been read locally on the coordinator
|
||||
-- because of the aggragate over the cte in HAVING
|
||||
-- but not on Citus MX
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -802,8 +796,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
|
||||
-- two could have been written locally and executed locally
|
||||
-- on the coordinator, but not on the workers
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT * FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -832,7 +826,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
|
|||
|
||||
-- this time the same CTE is both joined with a distributed
|
||||
-- table and used in HAVING
|
||||
WITH a AS (SELECT * FROM table_1 ORDER BY 1,2 DESC LIMIT 1)
|
||||
WITH a AS MATERIALIZED (SELECT * FROM table_1 ORDER BY 1,2 DESC LIMIT 1)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2 USING (key)
|
||||
|
@ -854,7 +848,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
-- this time the same CTE is both joined with a distributed
|
||||
-- table and used in HAVING -- but used in another subquery/aggregate
|
||||
-- so one more level of recursive planning
|
||||
WITH a AS (SELECT * FROM table_1)
|
||||
WITH a AS MATERIALIZED (SELECT * FROM table_1)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2 USING (key)
|
||||
|
@ -877,13 +871,13 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
(1 row)
|
||||
|
||||
-- same query as the above, without the aggragate
|
||||
WITH a AS (SELECT max(key) as key, max(value) as value FROM ref_table)
|
||||
WITH a AS MATERIALIZED (SELECT max(key) as key, max(value) as value FROM ref_table)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN ref_table USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(ref_table.value) <= (SELECT value FROM a));
|
||||
NOTICE: executing the command locally: WITH a AS (SELECT max(ref_table_1.key) AS key, max(ref_table_1.value) AS value FROM locally_execute_intermediate_results.ref_table_1580008 ref_table_1) SELECT count(*) AS count, a.key FROM (a JOIN locally_execute_intermediate_results.ref_table_1580008 ref_table(key, value) USING (key)) GROUP BY a.key HAVING (max(ref_table.value) OPERATOR(pg_catalog.<=) (SELECT a_1.value FROM a a_1))
|
||||
NOTICE: executing the command locally: WITH a AS MATERIALIZED (SELECT max(ref_table_1.key) AS key, max(ref_table_1.value) AS value FROM locally_execute_intermediate_results.ref_table_1580008 ref_table_1) SELECT count(*) AS count, a.key FROM (a JOIN locally_execute_intermediate_results.ref_table_1580008 ref_table(key, value) USING (key)) GROUP BY a.key HAVING (max(ref_table.value) OPERATOR(pg_catalog.<=) (SELECT a_1.value FROM a a_1))
|
||||
count | key
|
||||
---------------------------------------------------------------------
|
||||
1 | 6
|
||||
|
@ -892,9 +886,9 @@ NOTICE: executing the command locally: WITH a AS (SELECT max(ref_table_1.key) A
|
|||
-- some edge cases around CTEs used inside other CTEs
|
||||
-- everything could be executed locally on the coordinator,
|
||||
-- but not on the worker
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) FROM cte_2)
|
||||
WITH cte_1 as MATERIALIZED (SELECT * FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) FROM cte_2)
|
||||
SELECT * FROM cte_3;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
|
@ -916,9 +910,9 @@ NOTICE: executing the command locally: SELECT max FROM (SELECT intermediate_res
|
|||
-- the join between cte_3 and table_2 has to could have happened
|
||||
-- locally since the key = 1 resides on this node
|
||||
-- but because of the current implementation limitations we can't
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) as key FROM cte_2)
|
||||
WITH cte_1 as MATERIALIZED (SELECT * FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) as key FROM cte_2)
|
||||
SELECT * FROM cte_3 JOIN table_2 USING (key) WHERE table_2.key = 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
|
@ -938,9 +932,9 @@ NOTICE: executing the command locally: SELECT cte_3.key, table_2.value FROM ((S
|
|||
|
||||
-- the join between cte_3 and table_2 has to cannot happen
|
||||
-- locally because the key = 2 resides on a remote node
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) as key FROM cte_2)
|
||||
WITH cte_1 as MATERIALIZED (SELECT * FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) as key FROM cte_2)
|
||||
SELECT * FROM cte_3 JOIN table_2 USING (key) WHERE table_2.key = 2;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
|
@ -959,9 +953,9 @@ NOTICE: executing the command locally: SELECT max(key) AS key FROM (SELECT inte
|
|||
|
||||
-- the join between cte_3 and ref can could have happened locally
|
||||
-- but because of the current implementation limitations we can't
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) as key FROM cte_2)
|
||||
WITH cte_1 as MATERIALIZED (SELECT * FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) as key FROM cte_2)
|
||||
SELECT * FROM cte_3 JOIN ref_table USING (key);
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
|
@ -984,9 +978,9 @@ NOTICE: executing the command locally: SELECT cte_3.key, ref_table.value FROM (
|
|||
|
||||
-- some cases around router queries
|
||||
-- a router query, but the having has two cte joins
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT count(*)
|
||||
FROM table_2
|
||||
WHERE KEY = 3
|
||||
|
@ -1008,9 +1002,9 @@ NOTICE: executing the command locally: SELECT max(value) AS max FROM locally_ex
|
|||
|
||||
-- a router query, but the having has two cte joins
|
||||
-- and the jointree has a join with another cte
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT count(*)
|
||||
FROM table_2 JOIN cte_3 USING(key)
|
||||
WHERE KEY = 3
|
||||
|
@ -1036,9 +1030,9 @@ NOTICE: executing the command locally: SELECT key, value FROM locally_execute_i
|
|||
|
||||
-- a router query, but the having has two cte joins
|
||||
-- and the jointree has a join with the same CTEs
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT count(*)
|
||||
FROM table_2 JOIN cte_3 USING(key) JOIN cte_2 ON (key = MAX::int) JOIN cte_1 USING(MAX)
|
||||
WHERE KEY = 3
|
||||
|
@ -1062,8 +1056,8 @@ NOTICE: executing the command locally: SELECT key, value FROM locally_execute_i
|
|||
(0 rows)
|
||||
|
||||
-- subPlans needed remotely as the subquery is pushed down
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_2)
|
||||
SELECT * FROM
|
||||
(SELECT key FROM table_1 GROUP BY key HAVING max(value) > (SELECT * FROM cte_1)) as foo,
|
||||
(SELECT key FROM table_2 GROUP BY key HAVING max(value) > (SELECT * FROM cte_2)) as bar
|
||||
|
@ -1087,8 +1081,8 @@ NOTICE: executing the command locally: SELECT worker_column_1 AS key, worker_co
|
|||
|
||||
-- the second subquery needs to be recursively planned due to non-colocated subquery join
|
||||
-- so cte_2 becomes part of master query of that recursive subquery planning
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_2)
|
||||
SELECT * FROM
|
||||
(SELECT value AS key FROM table_1 GROUP BY value HAVING max(value) > (SELECT * FROM cte_1)) as foo,
|
||||
(SELECT value AS key FROM table_2 GROUP BY value HAVING max(value) > (SELECT * FROM cte_2)) as bar
|
||||
|
@ -1116,8 +1110,8 @@ NOTICE: executing the command locally: SELECT foo.key, bar.key FROM (SELECT int
|
|||
(0 rows)
|
||||
|
||||
-- now, forcing all subqueries to be on the local node
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_2)
|
||||
SELECT * FROM
|
||||
(SELECT value AS key FROM table_1 GROUP BY value HAVING max(value) > (SELECT * FROM cte_1) LIMIT 1) as foo,
|
||||
(SELECT value AS key FROM table_2 GROUP BY value HAVING max(value) > (SELECT * FROM cte_2) LIMIT 1) as bar
|
||||
|
@ -1148,7 +1142,7 @@ NOTICE: executing the command locally: SELECT foo.key, bar.key FROM (SELECT int
|
|||
set citus.task_assignment_policy TO "round-robin" ;
|
||||
-- the query cannot be executed locally, but still because of
|
||||
-- HAVING the intermediate result is written to local file as well
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -1170,8 +1164,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
|
||||
-- On non-mx case the subquery in the WHERE part of the query can be executed locally
|
||||
-- however, on Citus MX we have this limitation where the query cannot be executed locally
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -1203,8 +1197,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
|
||||
-- subquery in the WHERE part of the query should not be executed locally
|
||||
-- because it can be pushed down with the jointree
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(key) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(key) FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -1232,9 +1226,9 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
|
||||
-- although all the intermediate results are safe to be in local files
|
||||
-- we currently do not support it on Citus MX
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(key) FROM table_2),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(key) FROM table_2),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -1263,8 +1257,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
|
|||
|
||||
-- multiple CTEs are joined inside HAVING, so written to file
|
||||
-- locally, but nothing executed locally
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -1293,9 +1287,9 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
-- multiple CTEs are joined inside HAVING, so written to file
|
||||
-- locally, also the join tree contains only another CTE, so should be
|
||||
-- executed locally, but not on an Citus MX worker
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -1325,7 +1319,7 @@ NOTICE: executing the command locally: SELECT key, value FROM locally_execute_i
|
|||
-- plus that could have been read locally on the coordinator
|
||||
-- because of the aggragate over the cte in HAVING
|
||||
-- but not on Citus MX
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -1350,8 +1344,8 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
|
||||
-- two could have been written locally and executed locally
|
||||
-- on the coordinator, but not on the workers
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT * FROM table_1)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM table_1)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
|
@ -1379,7 +1373,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
|
|||
|
||||
-- this time the same CTE is both joined with a distributed
|
||||
-- table and used in HAVING
|
||||
WITH a AS (SELECT * FROM table_1 ORDER BY 1,2 DESC LIMIT 1)
|
||||
WITH a AS MATERIALIZED (SELECT * FROM table_1 ORDER BY 1,2 DESC LIMIT 1)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2 USING (key)
|
||||
|
@ -1401,7 +1395,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
-- this time the same CTE is both joined with a distributed
|
||||
-- table and used in HAVING -- but used in another subquery/aggregate
|
||||
-- so one more level of recursive planning
|
||||
WITH a AS (SELECT * FROM table_1)
|
||||
WITH a AS MATERIALIZED (SELECT * FROM table_1)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN table_2 USING (key)
|
||||
|
@ -1423,13 +1417,13 @@ NOTICE: executing the command locally: SELECT count(*) AS count, worker_column_
|
|||
(1 row)
|
||||
|
||||
-- same query as the above, without the aggragate
|
||||
WITH a AS (SELECT max(key) as key, max(value) as value FROM ref_table)
|
||||
WITH a AS MATERIALIZED (SELECT max(key) as key, max(value) as value FROM ref_table)
|
||||
SELECT count(*),
|
||||
key
|
||||
FROM a JOIN ref_table USING (key)
|
||||
GROUP BY key
|
||||
HAVING (max(ref_table.value) <= (SELECT value FROM a));
|
||||
NOTICE: executing the command locally: WITH a AS (SELECT max(ref_table_1.key) AS key, max(ref_table_1.value) AS value FROM locally_execute_intermediate_results.ref_table_1580008 ref_table_1) SELECT count(*) AS count, a.key FROM (a JOIN locally_execute_intermediate_results.ref_table_1580008 ref_table(key, value) USING (key)) GROUP BY a.key HAVING (max(ref_table.value) OPERATOR(pg_catalog.<=) (SELECT a_1.value FROM a a_1))
|
||||
NOTICE: executing the command locally: WITH a AS MATERIALIZED (SELECT max(ref_table_1.key) AS key, max(ref_table_1.value) AS value FROM locally_execute_intermediate_results.ref_table_1580008 ref_table_1) SELECT count(*) AS count, a.key FROM (a JOIN locally_execute_intermediate_results.ref_table_1580008 ref_table(key, value) USING (key)) GROUP BY a.key HAVING (max(ref_table.value) OPERATOR(pg_catalog.<=) (SELECT a_1.value FROM a a_1))
|
||||
count | key
|
||||
---------------------------------------------------------------------
|
||||
1 | 6
|
||||
|
@ -1439,20 +1433,18 @@ NOTICE: executing the command locally: WITH a AS (SELECT max(ref_table_1.key) A
|
|||
-- everything could be executed locally on the coordinator,
|
||||
-- but not on the worker
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) FROM cte_2)
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) FROM cte_2)
|
||||
SELECT * FROM cte_3;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS max FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)) cte_3
|
||||
DEBUG: CTE cte_1 is going to be inlined via distributed planning
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_2: SELECT key, value FROM (SELECT table_1.key, table_1.value FROM locally_execute_intermediate_results.table_1) cte_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_3: SELECT max(key) AS max FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(max integer)) cte_3
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
NOTICE: executing the command locally: SELECT key, value FROM locally_execute_intermediate_results.table_1_1580000 table_1 WHERE true
|
||||
NOTICE: executing the command locally: SELECT key, value FROM locally_execute_intermediate_results.table_1_1580002 table_1 WHERE true
|
||||
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
||||
NOTICE: executing the command locally: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
|
||||
NOTICE: executing the command locally: SELECT max FROM (SELECT intermediate_result.max FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(max integer)) cte_3
|
||||
NOTICE: executing the command locally: SELECT max(key) AS max FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
max
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
|
@ -1462,20 +1454,19 @@ NOTICE: executing the command locally: SELECT max FROM (SELECT intermediate_res
|
|||
-- locally since the key = 1 resides on this node
|
||||
-- but because of the current implementation limitations we can't
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) as key FROM cte_2)
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) as key FROM cte_2)
|
||||
SELECT * FROM cte_3 JOIN table_2 USING (key) WHERE table_2.key = 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, table_2.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: CTE cte_1 is going to be inlined via distributed planning
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_2: SELECT key, value FROM (SELECT table_1.key, table_1.value FROM locally_execute_intermediate_results.table_1) cte_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_3: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, table_2.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
NOTICE: executing the command locally: SELECT key, value FROM locally_execute_intermediate_results.table_1_1580000 table_1 WHERE true
|
||||
NOTICE: executing the command locally: SELECT key, value FROM locally_execute_intermediate_results.table_1_1580002 table_1 WHERE true
|
||||
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
|
||||
NOTICE: executing the command locally: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
NOTICE: executing the command locally: SELECT cte_3.key, table_2.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.table_2_1580004 table_2(key, value) USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
|
||||
NOTICE: executing the command locally: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
NOTICE: executing the command locally: SELECT cte_3.key, table_2.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.table_2_1580004 table_2(key, value) USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
@ -1483,19 +1474,17 @@ NOTICE: executing the command locally: SELECT cte_3.key, table_2.value FROM ((S
|
|||
-- the join between cte_3 and table_2 has to cannot happen
|
||||
-- locally because the key = 2 resides on a remote node
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) as key FROM cte_2)
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) as key FROM cte_2)
|
||||
SELECT * FROM cte_3 JOIN table_2 USING (key) WHERE table_2.key = 2;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, table_2.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 2)
|
||||
DEBUG: CTE cte_1 is going to be inlined via distributed planning
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_2: SELECT key, value FROM (SELECT table_1.key, table_1.value FROM locally_execute_intermediate_results.table_1) cte_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_3: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, table_2.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 2)
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
NOTICE: executing the command locally: SELECT key, value FROM locally_execute_intermediate_results.table_1_1580000 table_1 WHERE true
|
||||
NOTICE: executing the command locally: SELECT key, value FROM locally_execute_intermediate_results.table_1_1580002 table_1 WHERE true
|
||||
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
|
||||
NOTICE: executing the command locally: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
@ -1503,21 +1492,20 @@ NOTICE: executing the command locally: SELECT max(key) AS key FROM (SELECT inte
|
|||
-- the join between cte_3 and ref can could have happened locally
|
||||
-- but because of the current implementation limitations we can't
|
||||
WITH cte_1 as (SELECT * FROM table_1),
|
||||
cte_2 AS (SELECT * FROM cte_1),
|
||||
cte_3 AS (SELECT max(key) as key FROM cte_2)
|
||||
cte_2 AS MATERIALIZED (SELECT * FROM cte_1),
|
||||
cte_3 AS MATERIALIZED (SELECT max(key) as key FROM cte_2)
|
||||
SELECT * FROM cte_3 JOIN ref_table USING (key);
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT key, value FROM locally_execute_intermediate_results.table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_1
|
||||
DEBUG: generating subplan XXX_3 for CTE cte_3: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.ref_table USING (key))
|
||||
DEBUG: CTE cte_1 is going to be inlined via distributed planning
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_2: SELECT key, value FROM (SELECT table_1.key, table_1.value FROM locally_execute_intermediate_results.table_1) cte_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_3: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT cte_3.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.ref_table USING (key))
|
||||
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
|
||||
NOTICE: executing the command locally: SELECT key, value FROM locally_execute_intermediate_results.table_1_1580000 table_1 WHERE true
|
||||
NOTICE: executing the command locally: SELECT key, value FROM locally_execute_intermediate_results.table_1_1580002 table_1 WHERE true
|
||||
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
|
||||
NOTICE: executing the command locally: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
NOTICE: executing the command locally: SELECT cte_3.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.ref_table_1580008 ref_table(key, value) USING (key))
|
||||
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
|
||||
NOTICE: executing the command locally: SELECT max(key) AS key FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) cte_2
|
||||
NOTICE: executing the command locally: SELECT cte_3.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_3 JOIN locally_execute_intermediate_results.ref_table_1580008 ref_table(key, value) USING (key))
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
4 | 4
|
||||
|
@ -1525,9 +1513,9 @@ NOTICE: executing the command locally: SELECT cte_3.key, ref_table.value FROM (
|
|||
|
||||
-- some cases around router queries
|
||||
-- a router query, but the having has two cte joins
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT count(*)
|
||||
FROM table_2
|
||||
WHERE KEY = 3
|
||||
|
@ -1549,9 +1537,9 @@ NOTICE: executing the command locally: SELECT max(value) AS max FROM locally_ex
|
|||
|
||||
-- a router query, but the having has two cte joins
|
||||
-- and the jointree has a join with another cte
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT count(*)
|
||||
FROM table_2 JOIN cte_3 USING(key)
|
||||
WHERE KEY = 3
|
||||
|
@ -1576,9 +1564,9 @@ NOTICE: executing the command locally: SELECT key, value FROM locally_execute_i
|
|||
(0 rows)
|
||||
|
||||
-- the same query as above, try to hit local node with either of the queries
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT count(*)
|
||||
FROM table_2 JOIN cte_3 USING(key)
|
||||
WHERE KEY = 3
|
||||
|
@ -1604,9 +1592,9 @@ NOTICE: executing the command locally: SELECT key, value FROM locally_execute_i
|
|||
|
||||
-- a router query, but the having has two cte joins
|
||||
-- and the jointree has a join with the same CTEs
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_1),
|
||||
cte_3 AS (SELECT * FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_3 AS MATERIALIZED (SELECT * FROM table_2)
|
||||
SELECT count(*)
|
||||
FROM table_2 JOIN cte_3 USING(key) JOIN cte_2 ON (key = MAX::int) JOIN cte_1 USING(MAX)
|
||||
WHERE KEY = 3
|
||||
|
@ -1631,8 +1619,8 @@ NOTICE: executing the command locally: SELECT key, value FROM locally_execute_i
|
|||
(0 rows)
|
||||
|
||||
- subPlans needed remotely as the subquery is pushed down
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_2)
|
||||
SELECT * FROM
|
||||
(SELECT key FROM table_1 GROUP BY key HAVING max(value) > (SELECT * FROM cte_1)) as foo,
|
||||
(SELECT key FROM table_2 GROUP BY key HAVING max(value) > (SELECT * FROM cte_2)) as bar
|
||||
|
@ -1640,8 +1628,8 @@ SELECT * FROM
|
|||
ERROR: syntax error at or near "-"
|
||||
-- the second subquery needs to be recursively planned due to non-colocated subquery join
|
||||
-- so cte_2 becomes part of master query of that recursive subquery planning
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_2)
|
||||
SELECT * FROM
|
||||
(SELECT value AS key FROM table_1 GROUP BY value HAVING max(value) > (SELECT * FROM cte_1)) as foo,
|
||||
(SELECT value AS key FROM table_2 GROUP BY value HAVING max(value) > (SELECT * FROM cte_2)) as bar
|
||||
|
@ -1668,8 +1656,8 @@ NOTICE: executing the command locally: SELECT worker_column_1 AS key, max(worke
|
|||
(0 rows)
|
||||
|
||||
-- now, forcing all subqueries to be on the local node
|
||||
WITH cte_1 AS (SELECT max(value) FROM table_1),
|
||||
cte_2 AS (SELECT max(value) FROM table_2)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT max(value) FROM table_1),
|
||||
cte_2 AS MATERIALIZED (SELECT max(value) FROM table_2)
|
||||
SELECT * FROM
|
||||
(SELECT value AS key FROM table_1 GROUP BY value HAVING max(value) > (SELECT * FROM cte_1) LIMIT 1) as foo,
|
||||
(SELECT value AS key FROM table_2 GROUP BY value HAVING max(value) > (SELECT * FROM cte_2) LIMIT 1) as bar
|
||||
|
|
|
@ -14,7 +14,6 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
|||
|
||||
RESET client_min_messages;
|
||||
-- make results consistent
|
||||
SET citus.enable_cte_inlining TO OFF;
|
||||
-- create test tables
|
||||
CREATE TABLE postgres_local_table (a int);
|
||||
CREATE TABLE partitioned_postgres_local_table(a int) PARTITION BY RANGE(a);
|
||||
|
@ -470,7 +469,7 @@ SELECT COUNT(*) FROM unlogged_distributed_table u1 JOIN partitioned_distributed_
|
|||
|
||||
RESET citus.enable_repartition_joins;
|
||||
-- joins with cte's
|
||||
WITH cte_1 AS (SELECT * FROM partitioned_distributed_table)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT * FROM partitioned_distributed_table)
|
||||
SELECT COUNT(*) FROM cte_1;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT a, b FROM mixed_relkind_tests.partitioned_distributed_table
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1
|
||||
|
@ -479,7 +478,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
|
|||
78
|
||||
(1 row)
|
||||
|
||||
WITH cte_1 AS (SELECT * FROM partitioned_distributed_table)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT * FROM partitioned_distributed_table)
|
||||
SELECT COUNT(*) FROM cte_1 JOIN partitioned_distributed_table USING (a);
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT a, b FROM mixed_relkind_tests.partitioned_distributed_table
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 JOIN mixed_relkind_tests.partitioned_distributed_table USING (a))
|
||||
|
@ -488,7 +487,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
|
|||
1014
|
||||
(1 row)
|
||||
|
||||
WITH cte_1 AS (SELECT * FROM foreign_distributed_table)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT * FROM foreign_distributed_table)
|
||||
SELECT COUNT(*) FROM cte_1 JOIN foreign_distributed_table USING (a);
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT a, b FROM mixed_relkind_tests.foreign_distributed_table
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 JOIN mixed_relkind_tests.foreign_distributed_table USING (a))
|
||||
|
@ -497,7 +496,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
|
|||
0
|
||||
(1 row)
|
||||
|
||||
WITH cte_1 AS (SELECT * FROM partitioned_distributed_table)
|
||||
WITH cte_1 AS MATERIALIZED (SELECT * FROM partitioned_distributed_table)
|
||||
SELECT COUNT(*) FROM cte_1 JOIN partitioned_distributed_table USING (b);
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: SELECT a, b FROM mixed_relkind_tests.partitioned_distributed_table
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 JOIN mixed_relkind_tests.partitioned_distributed_table USING (b))
|
||||
|
@ -518,8 +517,8 @@ FROM (SELECT * FROM unlogged_distributed_table WHERE b = 1) AS sub1,
|
|||
(SELECT * FROM unlogged_distributed_table WHERE b = 2) AS sub2
|
||||
WHERE sub1.a = sub2.a AND sub1.a = dt.a AND dt.a > 1;
|
||||
-- multi shard non-colocated update
|
||||
WITH cte1 AS (SELECT * FROM partitioned_distributed_table WHERE b = 1),
|
||||
cte2 AS (SELECT * FROM partitioned_distributed_table WHERE b = 2)
|
||||
WITH cte1 AS MATERIALIZED (SELECT * FROM partitioned_distributed_table WHERE b = 1),
|
||||
cte2 AS MATERIALIZED (SELECT * FROM partitioned_distributed_table WHERE b = 2)
|
||||
UPDATE partitioned_distributed_table dt SET b = cte1.a + cte2.a
|
||||
FROM cte1, cte2 WHERE cte1.a != cte2.a AND cte1.a = dt.a AND dt.a > 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte1: SELECT a, b FROM mixed_relkind_tests.partitioned_distributed_table WHERE (b OPERATOR(pg_catalog.=) 1)
|
||||
|
|
|
@ -1,555 +0,0 @@
|
|||
--
|
||||
-- MULTI_COMPLEX_EXPRESSIONS
|
||||
--
|
||||
-- Check that we can correctly handle complex expressions and aggregates.
|
||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
12000.0000000000000000
|
||||
(1 row)
|
||||
|
||||
SELECT sum(l_quantity) / (10 * avg(l_quantity)) FROM lineitem;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1200.0000000000000000
|
||||
(1 row)
|
||||
|
||||
SELECT (sum(l_quantity) / (10 * avg(l_quantity))) + 11 FROM lineitem;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1211.0000000000000000
|
||||
(1 row)
|
||||
|
||||
SELECT avg(l_quantity) as average FROM lineitem;
|
||||
average
|
||||
---------------------------------------------------------------------
|
||||
25.4462500000000000
|
||||
(1 row)
|
||||
|
||||
SELECT 100 * avg(l_quantity) as average_times_hundred FROM lineitem;
|
||||
average_times_hundred
|
||||
---------------------------------------------------------------------
|
||||
2544.6250000000000000
|
||||
(1 row)
|
||||
|
||||
SELECT 100 * avg(l_quantity) / 10 as average_times_ten FROM lineitem;
|
||||
average_times_ten
|
||||
---------------------------------------------------------------------
|
||||
254.4625000000000000
|
||||
(1 row)
|
||||
|
||||
SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
l_quantity | count_quantity
|
||||
---------------------------------------------------------------------
|
||||
44.00 | 2150
|
||||
38.00 | 2160
|
||||
45.00 | 2180
|
||||
13.00 | 2190
|
||||
47.00 | 2200
|
||||
29.00 | 2220
|
||||
36.00 | 2230
|
||||
49.00 | 2230
|
||||
3.00 | 2270
|
||||
35.00 | 2280
|
||||
18.00 | 2290
|
||||
31.00 | 2290
|
||||
43.00 | 2290
|
||||
14.00 | 2300
|
||||
16.00 | 2300
|
||||
17.00 | 2300
|
||||
26.00 | 2300
|
||||
7.00 | 2320
|
||||
10.00 | 2340
|
||||
34.00 | 2340
|
||||
15.00 | 2350
|
||||
25.00 | 2360
|
||||
33.00 | 2360
|
||||
42.00 | 2360
|
||||
2.00 | 2370
|
||||
12.00 | 2410
|
||||
37.00 | 2410
|
||||
6.00 | 2420
|
||||
22.00 | 2420
|
||||
1.00 | 2430
|
||||
19.00 | 2430
|
||||
4.00 | 2440
|
||||
20.00 | 2460
|
||||
48.00 | 2460
|
||||
41.00 | 2470
|
||||
24.00 | 2490
|
||||
27.00 | 2490
|
||||
8.00 | 2500
|
||||
11.00 | 2500
|
||||
5.00 | 2540
|
||||
21.00 | 2550
|
||||
32.00 | 2550
|
||||
9.00 | 2580
|
||||
39.00 | 2600
|
||||
46.00 | 2600
|
||||
50.00 | 2600
|
||||
23.00 | 2610
|
||||
30.00 | 2640
|
||||
40.00 | 2690
|
||||
28.00 | 2730
|
||||
(50 rows)
|
||||
|
||||
-- Check that we can handle complex select clause expressions.
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE octet_length(l_comment || l_comment) > 40;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
8148
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE octet_length(concat(l_comment, l_comment)) > 40;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
8148
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4611
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE octet_length(l_comment) + 10 > 40;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4611
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE (l_receiptdate::timestamp - l_shipdate::timestamp) > interval '5 days';
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10008
|
||||
(1 row)
|
||||
|
||||
-- can push down queries where no columns present on the WHERE clause
|
||||
SELECT count(*) FROM lineitem WHERE random() = -0.1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- boolean tests can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE (l_partkey > 10000) is true;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
11423
|
||||
(1 row)
|
||||
|
||||
-- scalar array operator expressions can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE l_partkey = ANY(ARRAY[19353, 19354, 19355]);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- some more scalar array operator expressions
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE l_partkey = ALL(ARRAY[19353]);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- operator expressions involving arrays
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE ARRAY[19353, 19354, 19355] @> ARRAY[l_partkey];
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- coerced via io expressions can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE (l_quantity/100)::int::bool::text::bool;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
260
|
||||
(1 row)
|
||||
|
||||
-- case expressions can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE (CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7948
|
||||
(1 row)
|
||||
|
||||
-- coalesce expressions can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE COALESCE((l_partkey/50000)::bool, false);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
9122
|
||||
(1 row)
|
||||
|
||||
-- nullif expressions can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE NULLIF((l_partkey/50000)::bool, false);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
9122
|
||||
(1 row)
|
||||
|
||||
-- null test expressions can be pushed down
|
||||
SELECT count(*) FROM orders
|
||||
WHERE o_comment IS NOT null;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2985
|
||||
(1 row)
|
||||
|
||||
-- functions can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE isfinite(l_shipdate);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
12000
|
||||
(1 row)
|
||||
|
||||
-- constant expressions can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE 0 != 0;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- distinct expressions can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE l_partkey IS DISTINCT FROM 50040;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
11999
|
||||
(1 row)
|
||||
|
||||
-- row compare expression can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE row(l_partkey, 2, 3) > row(2000, 2, 3);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
11882
|
||||
(1 row)
|
||||
|
||||
-- combination of different expressions can be pushed down
|
||||
SELECT count(*) FROM lineitem
|
||||
WHERE
|
||||
(l_quantity/100)::int::bool::text::bool AND
|
||||
CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END AND
|
||||
COALESCE((l_partkey/50000)::bool, false) AND
|
||||
NULLIF((l_partkey/50000)::bool, false) AND
|
||||
isfinite(l_shipdate) AND
|
||||
l_partkey IS DISTINCT FROM 50040 AND
|
||||
row(l_partkey, 2, 3) > row(2000, 2, 3);
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
137
|
||||
(1 row)
|
||||
|
||||
-- constant expression in the WHERE clause with a column in the target list
|
||||
SELECT l_linenumber FROM lineitem
|
||||
WHERE
|
||||
1!=0
|
||||
ORDER BY
|
||||
l_linenumber
|
||||
LIMIT 1;
|
||||
l_linenumber
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- constant expression in the WHERE clause with expressions and a column the target list
|
||||
SELECT count(*) * l_discount as total_discount, count(*), sum(l_tax), l_discount FROM lineitem
|
||||
WHERE
|
||||
1!=0
|
||||
GROUP BY
|
||||
l_discount
|
||||
ORDER BY
|
||||
total_discount DESC, sum(l_tax) DESC;
|
||||
total_discount | count | sum | l_discount
|
||||
---------------------------------------------------------------------
|
||||
104.80 | 1048 | 41.08 | 0.10
|
||||
98.55 | 1095 | 44.15 | 0.09
|
||||
90.64 | 1133 | 45.94 | 0.08
|
||||
71.05 | 1015 | 41.19 | 0.07
|
||||
69.42 | 1157 | 45.75 | 0.06
|
||||
53.60 | 1072 | 42.82 | 0.05
|
||||
43.64 | 1091 | 44.40 | 0.04
|
||||
32.55 | 1085 | 43.30 | 0.03
|
||||
22.22 | 1111 | 45.07 | 0.02
|
||||
11.22 | 1122 | 44.54 | 0.01
|
||||
0.00 | 1071 | 44.00 | 0.00
|
||||
(11 rows)
|
||||
|
||||
-- distinct expressions in the WHERE clause with a column in the target list
|
||||
SELECT l_linenumber FROM lineitem
|
||||
WHERE
|
||||
l_linenumber IS DISTINCT FROM 1 AND
|
||||
l_orderkey IS DISTINCT FROM 8997
|
||||
ORDER BY
|
||||
l_linenumber
|
||||
LIMIT 1;
|
||||
l_linenumber
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- distinct expressions in the WHERE clause with expressions and a column the target list
|
||||
SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem
|
||||
WHERE
|
||||
l_linenumber IS DISTINCT FROM 1 AND
|
||||
l_orderkey IS DISTINCT FROM 8997
|
||||
GROUP BY
|
||||
l_receiptdate
|
||||
ORDER BY
|
||||
l_receiptdate
|
||||
LIMIT 1;
|
||||
max | min | l_receiptdate
|
||||
---------------------------------------------------------------------
|
||||
3 | 0.07 | 01-09-1992
|
||||
(1 row)
|
||||
|
||||
-- Check that we can handle implicit and explicit join clause definitions.
|
||||
SELECT count(*) FROM lineitem, orders
|
||||
WHERE l_orderkey = o_orderkey AND l_quantity < 5;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
951
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM lineitem
|
||||
JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
951
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey
|
||||
WHERE l_quantity < 5;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
951
|
||||
(1 row)
|
||||
|
||||
-- Check that we make sure local joins are between columns only.
|
||||
SELECT count(*) FROM lineitem, orders WHERE l_orderkey + 1 = o_orderkey;
|
||||
ERROR: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
-- Check that we can issue limit/offset queries
|
||||
-- the subquery is recursively planned since it contains OFFSET, which is not pushdownable
|
||||
SELECT * FROM (SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custkey OFFSET 20) sq ORDER BY 1 LIMIT 5;
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Subqueries with offset are not supported yet
|
||||
-- the subquery is recursively planned since it contains OFFSET, which is not pushdownable
|
||||
SELECT * FROM (SELECT o_orderkey FROM orders ORDER BY o_orderkey OFFSET 20) sq ORDER BY 1 LIMIT 5;
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Subqueries with offset are not supported yet
|
||||
-- Simple LIMIT/OFFSET with ORDER BY
|
||||
SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20;
|
||||
o_orderkey
|
||||
---------------------------------------------------------------------
|
||||
69
|
||||
70
|
||||
71
|
||||
96
|
||||
97
|
||||
98
|
||||
99
|
||||
100
|
||||
101
|
||||
102
|
||||
(10 rows)
|
||||
|
||||
-- LIMIT/OFFSET with a subquery
|
||||
SELECT
|
||||
customer_keys.o_custkey,
|
||||
SUM(order_count) AS total_order_count
|
||||
FROM
|
||||
(SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count
|
||||
FROM orders GROUP BY o_custkey, o_orderstatus ) customer_keys
|
||||
GROUP BY
|
||||
customer_keys.o_custkey
|
||||
ORDER BY
|
||||
customer_keys.o_custkey DESC
|
||||
LIMIT 10 OFFSET 20;
|
||||
o_custkey | total_order_count
|
||||
---------------------------------------------------------------------
|
||||
1466 | 1
|
||||
1465 | 2
|
||||
1463 | 4
|
||||
1462 | 10
|
||||
1460 | 1
|
||||
1459 | 6
|
||||
1457 | 1
|
||||
1456 | 3
|
||||
1454 | 2
|
||||
1453 | 5
|
||||
(10 rows)
|
||||
|
||||
SELECT
|
||||
customer_keys.o_custkey,
|
||||
SUM(order_count) AS total_order_count
|
||||
FROM
|
||||
(SELECT o_custkey, o_orderstatus, COUNT(*) over (partition by o_orderstatus) AS order_count
|
||||
FROM orders GROUP BY o_custkey, o_orderstatus ) customer_keys
|
||||
GROUP BY
|
||||
customer_keys.o_custkey
|
||||
ORDER BY
|
||||
customer_keys.o_custkey DESC
|
||||
LIMIT 10 OFFSET 20;
|
||||
o_custkey | total_order_count
|
||||
---------------------------------------------------------------------
|
||||
1466 | 759
|
||||
1465 | 759
|
||||
1463 | 1499
|
||||
1462 | 1499
|
||||
1460 | 759
|
||||
1459 | 1499
|
||||
1457 | 740
|
||||
1456 | 830
|
||||
1454 | 1499
|
||||
1453 | 1499
|
||||
(10 rows)
|
||||
|
||||
SELECT
|
||||
customer_keys.o_custkey,
|
||||
SUM(order_count1 + order_count) AS total_order_count
|
||||
FROM
|
||||
(SELECT o_custkey, o_orderstatus, count(*) order_count1, COUNT(*) over (partition by o_orderstatus) AS order_count
|
||||
FROM orders GROUP BY o_custkey, o_orderstatus ) customer_keys
|
||||
GROUP BY
|
||||
customer_keys.o_custkey
|
||||
ORDER BY
|
||||
customer_keys.o_custkey DESC
|
||||
LIMIT 10 OFFSET 20;
|
||||
o_custkey | total_order_count
|
||||
---------------------------------------------------------------------
|
||||
1466 | 760
|
||||
1465 | 761
|
||||
1463 | 1503
|
||||
1462 | 1509
|
||||
1460 | 760
|
||||
1459 | 1505
|
||||
1457 | 741
|
||||
1456 | 833
|
||||
1454 | 1501
|
||||
1453 | 1504
|
||||
(10 rows)
|
||||
|
||||
SET client_min_messages TO DEBUG1;
|
||||
-- Ensure that we push down LIMIT and OFFSET properly
|
||||
-- No Group-By -> Push Down
|
||||
CREATE TEMP TABLE temp_limit_test_1 AS
|
||||
SELECT o_custkey FROM orders LIMIT 10 OFFSET 15;
|
||||
DEBUG: push down of limit count: 25
|
||||
-- GROUP BY without ORDER BY -> No push-down
|
||||
CREATE TEMP TABLE temp_limit_test_2 AS
|
||||
SELECT o_custkey FROM orders GROUP BY o_custkey LIMIT 10 OFFSET 15;
|
||||
-- GROUP BY and ORDER BY non-aggregate -> push-down
|
||||
CREATE TEMP TABLE temp_limit_test_3 AS
|
||||
SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custkey LIMIT 10 OFFSET 15;
|
||||
DEBUG: push down of limit count: 25
|
||||
-- GROUP BY and ORDER BY aggregate -> No push-down
|
||||
CREATE TEMP TABLE temp_limit_test_4 AS
|
||||
SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt DESC LIMIT 10 OFFSET 15;
|
||||
-- OFFSET without LIMIT
|
||||
SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980;
|
||||
o_custkey
|
||||
---------------------------------------------------------------------
|
||||
1498
|
||||
1498
|
||||
1499
|
||||
1499
|
||||
1499
|
||||
(5 rows)
|
||||
|
||||
-- LIMIT/OFFSET with Joins
|
||||
SELECT
|
||||
li.l_partkey,
|
||||
o.o_custkey,
|
||||
li.l_quantity
|
||||
FROM
|
||||
lineitem li JOIN orders o ON li.l_orderkey = o.o_orderkey
|
||||
WHERE
|
||||
li.l_quantity > 25
|
||||
ORDER BY 1, 2, 3
|
||||
LIMIT 10 OFFSET 20;
|
||||
DEBUG: push down of limit count: 30
|
||||
l_partkey | o_custkey | l_quantity
|
||||
---------------------------------------------------------------------
|
||||
655 | 58 | 50.00
|
||||
669 | 319 | 34.00
|
||||
699 | 1255 | 50.00
|
||||
716 | 61 | 45.00
|
||||
723 | 14 | 36.00
|
||||
802 | 754 | 50.00
|
||||
831 | 589 | 32.00
|
||||
835 | 67 | 33.00
|
||||
864 | 439 | 32.00
|
||||
875 | 13 | 43.00
|
||||
(10 rows)
|
||||
|
||||
RESET client_min_messages;
|
||||
-- FILTERs
|
||||
SELECT
|
||||
l_orderkey,
|
||||
sum(l_extendedprice),
|
||||
sum(l_extendedprice) FILTER (WHERE l_shipmode = 'AIR'),
|
||||
count(*),
|
||||
count(*) FILTER (WHERE l_shipmode = 'AIR'),
|
||||
max(l_extendedprice),
|
||||
max(l_extendedprice) FILTER (WHERE l_quantity < 30)
|
||||
FROM lineitem
|
||||
GROUP BY l_orderkey
|
||||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_orderkey | sum | sum | count | count | max | max
|
||||
---------------------------------------------------------------------
|
||||
12804 | 440012.71 | 45788.16 | 7 | 1 | 94398.00 | 45788.16
|
||||
9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14
|
||||
2567 | 412076.77 | 59722.26 | 7 | 1 | 94894.00 | 9784.02
|
||||
11142 | 410502.38 | 44965.95 | 7 | 1 | 83989.44 | 44965.95
|
||||
12039 | 407048.94 | 76406.30 | 7 | 2 | 94471.02 | 19679.30
|
||||
2306 | 405629.96 | 28032.60 | 7 | 1 | 92838.00 | 44384.50
|
||||
5606 | 403595.91 | 36531.51 | 7 | 2 | 94890.18 | 30582.75
|
||||
11296 | 399079.89 | | 6 | 0 | 102449.00 | 33122.93
|
||||
11046 | 391163.26 | 31436.34 | 7 | 2 | 94506.24 | 47519.76
|
||||
4421 | 387313.12 | | 7 | 0 | 67301.52 | 23783.40
|
||||
(10 rows)
|
||||
|
||||
SELECT
|
||||
l_orderkey,
|
||||
sum(l_extendedprice),
|
||||
sum(l_extendedprice) FILTER (WHERE l_shipmode = 'AIR'),
|
||||
count(*),
|
||||
count(*) FILTER (WHERE l_shipmode = 'AIR'),
|
||||
max(l_extendedprice),
|
||||
max(l_extendedprice) FILTER (WHERE l_quantity < 30)
|
||||
FROM lineitem
|
||||
GROUP BY l_orderkey
|
||||
HAVING count(*) FILTER (WHERE l_shipmode = 'AIR') > 1
|
||||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 10;
|
||||
l_orderkey | sum | sum | count | count | max | max
|
||||
---------------------------------------------------------------------
|
||||
9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14
|
||||
12039 | 407048.94 | 76406.30 | 7 | 2 | 94471.02 | 19679.30
|
||||
5606 | 403595.91 | 36531.51 | 7 | 2 | 94890.18 | 30582.75
|
||||
11046 | 391163.26 | 31436.34 | 7 | 2 | 94506.24 | 47519.76
|
||||
14499 | 384140.30 | 67867.08 | 7 | 2 | 84335.36 | 46169.75
|
||||
11623 | 380598.48 | 133709.82 | 7 | 2 | 93701.54 | 21487.65
|
||||
10787 | 375688.09 | 99424.78 | 7 | 2 | 76732.67 | 50946.91
|
||||
12902 | 358191.24 | 76891.00 | 7 | 2 | 82008.08 | 35602.08
|
||||
3747 | 353701.23 | 68592.23 | 7 | 2 | 67181.10 | 46252.77
|
||||
5158 | 349889.05 | 159753.19 | 7 | 3 | 78714.67 | 29729.20
|
||||
(10 rows)
|
||||
|
|
@ -174,7 +174,7 @@ INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_co
|
|||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Insert on public.composite_type_partitioned_table_530003 (actual rows=0 loops=1)
|
||||
-> Result (actual rows=1 loops=1)
|
||||
Output: xxxxxx
|
||||
Output: 123, '(123,456)'::test_composite_type
|
||||
(9 rows)
|
||||
|
||||
SELECT run_command_on_coordinator_and_workers($cf$
|
||||
|
@ -218,7 +218,7 @@ INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_co
|
|||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Insert on public.composite_type_partitioned_table_530000 (actual rows=0 loops=1)
|
||||
-> Result (actual rows=1 loops=1)
|
||||
Output: xxxxxx
|
||||
Output: 123, '(456,678)'::test_composite_type
|
||||
(9 rows)
|
||||
|
||||
-- create and distribute a table on enum type column
|
||||
|
|
|
@ -345,14 +345,14 @@ EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
|||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
Sort (actual rows=50 loops=1)
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
|
||||
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
||||
Sort Method: quicksort Memory: 27kB
|
||||
-> HashAggregate (actual rows=50 loops=1)
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
|
||||
Group Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_quantity, remote_scan.count_quantity
|
||||
Task Count: 2
|
||||
Tuple data received from nodes: 780 bytes
|
||||
Tasks Shown: One of 2
|
||||
|
@ -361,48 +361,48 @@ Sort (actual rows=50 loops=1)
|
|||
Tuple data received from node: 390 bytes
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate (actual rows=50 loops=1)
|
||||
Output: xxxxxx
|
||||
Output: l_quantity, count(*)
|
||||
Group Key: lineitem.l_quantity
|
||||
-> Seq Scan on public.lineitem_290000 lineitem (actual rows=6000 loops=1)
|
||||
Output: xxxxxx
|
||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
-- Test query text output, with ANALYZE OFF
|
||||
EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
Sort
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
|
||||
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
|
||||
-> HashAggregate
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
|
||||
Group Key: remote_scan.l_quantity
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_quantity, remote_scan.count_quantity
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Query: SELECT l_quantity, count(*) AS count_quantity FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: xxxxxx
|
||||
Output: l_quantity, count(*)
|
||||
Group Key: lineitem.l_quantity
|
||||
-> Seq Scan on public.lineitem_290000 lineitem
|
||||
Output: xxxxxx
|
||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
-- Test verbose
|
||||
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
||||
Aggregate
|
||||
Output: xxxxxx
|
||||
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: xxxxxx
|
||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM lineitem_290000 lineitem WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: xxxxxx
|
||||
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
|
||||
-> Seq Scan on public.lineitem_290000 lineitem
|
||||
Output: xxxxxx
|
||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
-- Test join
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT * FROM lineitem
|
||||
|
@ -525,40 +525,40 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
|||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem
|
||||
HAVING sum(l_quantity) > 100;
|
||||
Aggregate
|
||||
Output: xxxxxx
|
||||
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
|
||||
Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: xxxxxx
|
||||
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) AS worker_column_4 FROM lineitem_290000 lineitem WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
Output: xxxxxx
|
||||
Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity)
|
||||
-> Seq Scan on public.lineitem_290000 lineitem
|
||||
Output: xxxxxx
|
||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
-- Test having without aggregate
|
||||
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||
SELECT l_quantity FROM lineitem
|
||||
GROUP BY l_quantity
|
||||
HAVING l_quantity > (100 * random());
|
||||
HashAggregate
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_quantity
|
||||
Group Key: remote_scan.l_quantity
|
||||
Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random()))
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_quantity, remote_scan.worker_column_2
|
||||
Task Count: 2
|
||||
Tasks Shown: One of 2
|
||||
-> Task
|
||||
Query: SELECT l_quantity, l_quantity AS worker_column_2 FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: xxxxxx
|
||||
Output: l_quantity, l_quantity
|
||||
Group Key: lineitem.l_quantity
|
||||
-> Seq Scan on public.lineitem_290000 lineitem
|
||||
Output: xxxxxx
|
||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
-- Subquery pushdown tests with explain
|
||||
EXPLAIN (COSTS OFF)
|
||||
SELECT
|
||||
|
@ -1385,38 +1385,36 @@ Custom Scan (Citus INSERT ... SELECT)
|
|||
-> Function Scan on generate_series s
|
||||
-> Function Scan on generate_series s_1
|
||||
-- explain with recursive planning
|
||||
-- prevent PG 11 - PG 12 outputs to diverge
|
||||
SET citus.enable_cte_inlining TO false;
|
||||
EXPLAIN (COSTS OFF, VERBOSE true)
|
||||
WITH keys AS (
|
||||
WITH keys AS MATERIALIZED (
|
||||
SELECT DISTINCT l_orderkey FROM lineitem_hash_part
|
||||
),
|
||||
series AS (
|
||||
series AS MATERIALIZED (
|
||||
SELECT s FROM generate_series(1,10) s
|
||||
)
|
||||
SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey)
|
||||
ORDER BY s;
|
||||
Custom Scan (Citus Adaptive)
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_orderkey
|
||||
-> Distributed Subplan XXX_1
|
||||
-> HashAggregate
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_orderkey
|
||||
Group Key: remote_scan.l_orderkey
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Output: xxxxxx
|
||||
Output: remote_scan.l_orderkey
|
||||
Task Count: 4
|
||||
Tasks Shown: One of 4
|
||||
-> Task
|
||||
Query: SELECT DISTINCT l_orderkey FROM lineitem_hash_part_360041 lineitem_hash_part WHERE true
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> HashAggregate
|
||||
Output: xxxxxx
|
||||
Output: l_orderkey
|
||||
Group Key: lineitem_hash_part.l_orderkey
|
||||
-> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part
|
||||
Output: xxxxxx
|
||||
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
|
||||
-> Distributed Subplan XXX_2
|
||||
-> Function Scan on pg_catalog.generate_series s
|
||||
Output: xxxxxx
|
||||
Output: s
|
||||
Function Call: generate_series(1, 10)
|
||||
Task Count: 1
|
||||
Tasks Shown: All
|
||||
|
@ -1424,21 +1422,20 @@ Custom Scan (Citus Adaptive)
|
|||
Query: SELECT keys.l_orderkey FROM ((SELECT intermediate_result.s FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(s integer)) series JOIN (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) keys ON ((series.s OPERATOR(pg_catalog.=) keys.l_orderkey))) ORDER BY series.s
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Merge Join
|
||||
Output: xxxxxx
|
||||
Output: intermediate_result_1.l_orderkey, intermediate_result.s
|
||||
Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey)
|
||||
-> Sort
|
||||
Output: xxxxxx
|
||||
Output: intermediate_result.s
|
||||
Sort Key: intermediate_result.s
|
||||
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result
|
||||
Output: xxxxxx
|
||||
Output: intermediate_result.s
|
||||
Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format)
|
||||
-> Sort
|
||||
Output: xxxxxx
|
||||
Output: intermediate_result_1.l_orderkey
|
||||
Sort Key: intermediate_result_1.l_orderkey
|
||||
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1
|
||||
Output: xxxxxx
|
||||
Output: intermediate_result_1.l_orderkey
|
||||
Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format)
|
||||
SET citus.enable_cte_inlining TO true;
|
||||
SELECT true AS valid FROM explain_json($$
|
||||
WITH result AS (
|
||||
SELECT l_quantity, count(*) count_quantity FROM lineitem
|
||||
|
@ -1757,7 +1754,7 @@ SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_t
|
|||
4
|
||||
(4 rows)
|
||||
|
||||
SELECT explain_analyze_output ~ 'Output: xxxxxx
|
||||
SELECT explain_analyze_output ~ 'Output: a, b' FROM worker_last_saved_explain_analyze();
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
|
|
|
@ -1,905 +0,0 @@
|
|||
--
|
||||
-- MULTI_EXTENSION
|
||||
--
|
||||
-- Tests around extension creation / upgrades
|
||||
--
|
||||
-- It'd be nice to script generation of this file, but alas, that's
|
||||
-- not done yet.
|
||||
-- differentiate the output file for pg11 and versions above, with regards to objects
|
||||
-- created per citus version depending on the postgres version. Upgrade tests verify the
|
||||
-- objects are added in citus_finish_pg_upgrade()
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven;
|
||||
version_above_eleven
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
SET citus.next_shard_id TO 580000;
|
||||
SELECT $definition$
|
||||
CREATE OR REPLACE FUNCTION test.maintenance_worker()
|
||||
RETURNS pg_stat_activity
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
activity record;
|
||||
BEGIN
|
||||
DO 'BEGIN END'; -- Force maintenance daemon to start
|
||||
-- we don't want to wait forever; loop will exit after 20 seconds
|
||||
FOR i IN 1 .. 200 LOOP
|
||||
PERFORM pg_stat_clear_snapshot();
|
||||
SELECT * INTO activity FROM pg_stat_activity
|
||||
WHERE application_name = 'Citus Maintenance Daemon' AND datname = current_database();
|
||||
IF activity.pid IS NOT NULL THEN
|
||||
RETURN activity;
|
||||
ELSE
|
||||
PERFORM pg_sleep(0.1);
|
||||
END IF ;
|
||||
END LOOP;
|
||||
-- fail if we reach the end of this loop
|
||||
raise 'Waited too long for maintenance daemon to start';
|
||||
END;
|
||||
$$;
|
||||
$definition$ create_function_test_maintenance_worker
|
||||
\gset
|
||||
CREATE TABLE prev_objects(description text);
|
||||
CREATE TABLE extension_diff(previous_object text COLLATE "C",
|
||||
current_object text COLLATE "C");
|
||||
CREATE FUNCTION print_extension_changes()
|
||||
RETURNS TABLE(previous_object text, current_object text)
|
||||
AS $func$
|
||||
BEGIN
|
||||
TRUNCATE TABLE extension_diff;
|
||||
|
||||
CREATE TABLE current_objects AS
|
||||
SELECT pg_catalog.pg_describe_object(classid, objid, 0) AS description
|
||||
FROM pg_catalog.pg_depend, pg_catalog.pg_extension e
|
||||
WHERE refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass
|
||||
AND refobjid = e.oid
|
||||
AND deptype = 'e'
|
||||
AND e.extname='citus';
|
||||
|
||||
INSERT INTO extension_diff
|
||||
SELECT p.description previous_object, c.description current_object
|
||||
FROM current_objects c FULL JOIN prev_objects p
|
||||
ON p.description = c.description
|
||||
WHERE p.description is null OR c.description is null;
|
||||
|
||||
DROP TABLE prev_objects;
|
||||
ALTER TABLE current_objects RENAME TO prev_objects;
|
||||
|
||||
RETURN QUERY SELECT * FROM extension_diff ORDER BY 1, 2;
|
||||
END
|
||||
$func$ LANGUAGE plpgsql;
|
||||
CREATE SCHEMA test;
|
||||
:create_function_test_maintenance_worker
|
||||
-- check maintenance daemon is started
|
||||
SELECT datname, current_database(),
|
||||
usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
|
||||
FROM test.maintenance_worker();
|
||||
datname | current_database | usename | extowner
|
||||
---------------------------------------------------------------------
|
||||
regression | regression | postgres | postgres
|
||||
(1 row)
|
||||
|
||||
-- ensure no unexpected objects were created outside pg_catalog
|
||||
SELECT pgio.type, pgio.identity
|
||||
FROM pg_depend AS pgd,
|
||||
pg_extension AS pge,
|
||||
LATERAL pg_identify_object(pgd.classid, pgd.objid, pgd.objsubid) AS pgio
|
||||
WHERE pgd.refclassid = 'pg_extension'::regclass AND
|
||||
pgd.refobjid = pge.oid AND
|
||||
pge.extname = 'citus' AND
|
||||
pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar')
|
||||
ORDER BY 1, 2;
|
||||
type | identity
|
||||
---------------------------------------------------------------------
|
||||
view | public.citus_tables
|
||||
(1 row)
|
||||
|
||||
-- DROP EXTENSION pre-created by the regression suite
|
||||
DROP EXTENSION citus;
|
||||
\c
|
||||
-- these tests switch between citus versions and call ddl's that require pg_dist_object to be created
|
||||
SET citus.enable_object_propagation TO 'false';
|
||||
SET citus.enable_version_checks TO 'false';
|
||||
CREATE EXTENSION citus VERSION '8.0-1';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-2';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-3';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-4';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-5';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-6';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-7';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-8';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-9';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-10';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-11';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-12';
|
||||
ALTER EXTENSION citus UPDATE TO '8.0-13';
|
||||
ALTER EXTENSION citus UPDATE TO '8.1-1';
|
||||
ALTER EXTENSION citus UPDATE TO '8.2-1';
|
||||
ALTER EXTENSION citus UPDATE TO '8.2-2';
|
||||
ALTER EXTENSION citus UPDATE TO '8.2-3';
|
||||
ALTER EXTENSION citus UPDATE TO '8.2-4';
|
||||
ALTER EXTENSION citus UPDATE TO '8.3-1';
|
||||
ALTER EXTENSION citus UPDATE TO '9.0-1';
|
||||
ALTER EXTENSION citus UPDATE TO '9.0-2';
|
||||
ALTER EXTENSION citus UPDATE TO '9.1-1';
|
||||
ALTER EXTENSION citus UPDATE TO '9.2-1';
|
||||
ALTER EXTENSION citus UPDATE TO '9.2-2';
|
||||
-- Snapshot of state at 9.2-2
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
| event trigger citus_cascade_to_partition
|
||||
| function alter_role_if_exists(text,text)
|
||||
| function any_value(anyelement)
|
||||
| function any_value_agg(anyelement,anyelement)
|
||||
| function array_cat_agg(anyarray)
|
||||
| function assign_distributed_transaction_id(integer,bigint,timestamp with time zone)
|
||||
| function authinfo_valid(text)
|
||||
| function broadcast_intermediate_result(text,text)
|
||||
| function check_distributed_deadlocks()
|
||||
| function citus_add_rebalance_strategy(name,regproc,regproc,regproc,real,real)
|
||||
| function citus_blocking_pids(integer)
|
||||
| function citus_create_restore_point(text)
|
||||
| function citus_dist_stat_activity()
|
||||
| function citus_drop_trigger()
|
||||
| function citus_executor_name(integer)
|
||||
| function citus_extradata_container(internal)
|
||||
| function citus_finish_pg_upgrade()
|
||||
| function citus_internal.find_groupid_for_node(text,integer)
|
||||
| function citus_internal.pg_dist_node_trigger_func()
|
||||
| function citus_internal.pg_dist_rebalance_strategy_enterprise_check()
|
||||
| function citus_internal.pg_dist_rebalance_strategy_trigger_func()
|
||||
| function citus_internal.pg_dist_shard_placement_trigger_func()
|
||||
| function citus_internal.refresh_isolation_tester_prepared_statement()
|
||||
| function citus_internal.replace_isolation_tester_func()
|
||||
| function citus_internal.restore_isolation_tester_func()
|
||||
| function citus_isolation_test_session_is_blocked(integer,integer[])
|
||||
| function citus_json_concatenate(json,json)
|
||||
| function citus_json_concatenate_final(json)
|
||||
| function citus_jsonb_concatenate(jsonb,jsonb)
|
||||
| function citus_jsonb_concatenate_final(jsonb)
|
||||
| function citus_node_capacity_1(integer)
|
||||
| function citus_prepare_pg_upgrade()
|
||||
| function citus_query_stats()
|
||||
| function citus_relation_size(regclass)
|
||||
| function citus_server_id()
|
||||
| function citus_set_default_rebalance_strategy(text)
|
||||
| function citus_shard_allowed_on_node_true(bigint,integer)
|
||||
| function citus_shard_cost_1(bigint)
|
||||
| function citus_shard_cost_by_disk_size(bigint)
|
||||
| function citus_stat_statements()
|
||||
| function citus_stat_statements_reset()
|
||||
| function citus_table_is_visible(oid)
|
||||
| function citus_table_size(regclass)
|
||||
| function citus_text_send_as_jsonb(text)
|
||||
| function citus_total_relation_size(regclass)
|
||||
| function citus_truncate_trigger()
|
||||
| function citus_validate_rebalance_strategy_functions(regproc,regproc,regproc)
|
||||
| function citus_version()
|
||||
| function citus_worker_stat_activity()
|
||||
| function column_name_to_column(regclass,text)
|
||||
| function column_to_column_name(regclass,text)
|
||||
| function coord_combine_agg(oid,cstring,anyelement)
|
||||
| function coord_combine_agg_ffunc(internal,oid,cstring,anyelement)
|
||||
| function coord_combine_agg_sfunc(internal,oid,cstring,anyelement)
|
||||
| function create_distributed_function(regprocedure,text,text)
|
||||
| function create_distributed_table(regclass,text,citus.distribution_type,text)
|
||||
| function create_intermediate_result(text,text)
|
||||
| function create_reference_table(regclass)
|
||||
| function distributed_tables_colocated(regclass,regclass)
|
||||
| function dump_global_wait_edges()
|
||||
| function dump_local_wait_edges()
|
||||
| function fetch_intermediate_results(text[],text,integer)
|
||||
| function get_all_active_transactions()
|
||||
| function get_colocated_shard_array(bigint)
|
||||
| function get_colocated_table_array(regclass)
|
||||
| function get_current_transaction_id()
|
||||
| function get_global_active_transactions()
|
||||
| function get_rebalance_progress()
|
||||
| function get_rebalance_table_shards_plan(regclass,real,integer,bigint[],boolean,name)
|
||||
| function get_shard_id_for_distribution_column(regclass,"any")
|
||||
| function isolate_tenant_to_new_shard(regclass,"any",text)
|
||||
| function json_cat_agg(json)
|
||||
| function jsonb_cat_agg(jsonb)
|
||||
| function lock_relation_if_exists(text,text)
|
||||
| function lock_shard_metadata(integer,bigint[])
|
||||
| function lock_shard_resources(integer,bigint[])
|
||||
| function mark_tables_colocated(regclass,regclass[])
|
||||
| function master_activate_node(text,integer)
|
||||
| function master_add_inactive_node(text,integer,integer,noderole,name)
|
||||
| function master_add_node(text,integer,integer,noderole,name)
|
||||
| function master_add_secondary_node(text,integer,text,integer,name)
|
||||
| function master_append_table_to_shard(bigint,text,text,integer)
|
||||
| function master_apply_delete_command(text)
|
||||
| function master_conninfo_cache_invalidate()
|
||||
| function master_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode)
|
||||
| function master_create_distributed_table(regclass,text,citus.distribution_type)
|
||||
| function master_create_empty_shard(text)
|
||||
| function master_create_worker_shards(text,integer,integer)
|
||||
| function master_disable_node(text,integer)
|
||||
| function master_dist_local_group_cache_invalidate()
|
||||
| function master_dist_node_cache_invalidate()
|
||||
| function master_dist_object_cache_invalidate()
|
||||
| function master_dist_partition_cache_invalidate()
|
||||
| function master_dist_placement_cache_invalidate()
|
||||
| function master_dist_shard_cache_invalidate()
|
||||
| function master_drain_node(text,integer,citus.shard_transfer_mode,name)
|
||||
| function master_drop_all_shards(regclass,text,text)
|
||||
| function master_drop_sequences(text[])
|
||||
| function master_get_active_worker_nodes()
|
||||
| function master_get_new_placementid()
|
||||
| function master_get_new_shardid()
|
||||
| function master_get_table_ddl_events(text)
|
||||
| function master_get_table_metadata(text)
|
||||
| function master_modify_multiple_shards(text)
|
||||
| function master_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode)
|
||||
| function master_remove_distributed_table_metadata_from_workers(regclass,text,text)
|
||||
| function master_remove_node(text,integer)
|
||||
| function master_remove_partition_metadata(regclass,text,text)
|
||||
| function master_run_on_worker(text[],integer[],text[],boolean)
|
||||
| function master_set_node_property(text,integer,text,boolean)
|
||||
| function master_unmark_object_distributed(oid,oid,integer)
|
||||
| function master_update_node(integer,text,integer,boolean,integer)
|
||||
| function master_update_shard_statistics(bigint)
|
||||
| function master_update_table_statistics(regclass)
|
||||
| function poolinfo_valid(text)
|
||||
| function read_intermediate_result(text,citus_copy_format)
|
||||
| function read_intermediate_results(text[],citus_copy_format)
|
||||
| function rebalance_table_shards(regclass,real,integer,bigint[],citus.shard_transfer_mode,boolean,name)
|
||||
| function recover_prepared_transactions()
|
||||
| function relation_is_a_known_shard(regclass)
|
||||
| function replicate_table_shards(regclass,integer,integer,bigint[],citus.shard_transfer_mode)
|
||||
| function role_exists(name)
|
||||
| function run_command_on_colocated_placements(regclass,regclass,text,boolean)
|
||||
| function run_command_on_placements(regclass,text,boolean)
|
||||
| function run_command_on_shards(regclass,text,boolean)
|
||||
| function run_command_on_workers(text,boolean)
|
||||
| function shard_name(regclass,bigint)
|
||||
| function start_metadata_sync_to_node(text,integer)
|
||||
| function stop_metadata_sync_to_node(text,integer)
|
||||
| function task_tracker_assign_task(bigint,integer,text)
|
||||
| function task_tracker_cleanup_job(bigint)
|
||||
| function task_tracker_conninfo_cache_invalidate()
|
||||
| function task_tracker_task_status(bigint,integer)
|
||||
| function upgrade_to_reference_table(regclass)
|
||||
| function worker_append_table_to_shard(text,text,text,integer)
|
||||
| function worker_apply_inter_shard_ddl_command(bigint,text,bigint,text,text)
|
||||
| function worker_apply_sequence_command(text)
|
||||
| function worker_apply_sequence_command(text,regtype)
|
||||
| function worker_apply_shard_ddl_command(bigint,text)
|
||||
| function worker_apply_shard_ddl_command(bigint,text,text)
|
||||
| function worker_cleanup_job_schema_cache()
|
||||
| function worker_create_or_replace_object(text)
|
||||
| function worker_create_schema(bigint,text)
|
||||
| function worker_create_truncate_trigger(regclass)
|
||||
| function worker_drop_distributed_table(text)
|
||||
| function worker_execute_sql_task(bigint,integer,text,boolean)
|
||||
| function worker_fetch_foreign_file(text,text,bigint,text[],integer[])
|
||||
| function worker_fetch_partition_file(bigint,integer,integer,integer,text,integer)
|
||||
| function worker_hash("any")
|
||||
| function worker_hash_partition_table(bigint,integer,text,text,oid,anyarray)
|
||||
| function worker_merge_files_and_run_query(bigint,integer,text,text)
|
||||
| function worker_merge_files_into_table(bigint,integer,text[],text[])
|
||||
| function worker_partial_agg(oid,anyelement)
|
||||
| function worker_partial_agg_ffunc(internal)
|
||||
| function worker_partial_agg_sfunc(internal,oid,anyelement)
|
||||
| function worker_partition_query_result(text,text,integer,citus.distribution_type,text[],text[],boolean)
|
||||
| function worker_range_partition_table(bigint,integer,text,text,oid,anyarray)
|
||||
| function worker_repartition_cleanup(bigint)
|
||||
| schema citus
|
||||
| schema citus_internal
|
||||
| sequence pg_dist_colocationid_seq
|
||||
| sequence pg_dist_groupid_seq
|
||||
| sequence pg_dist_node_nodeid_seq
|
||||
| sequence pg_dist_placement_placementid_seq
|
||||
| sequence pg_dist_shardid_seq
|
||||
| table citus.pg_dist_object
|
||||
| table pg_dist_authinfo
|
||||
| table pg_dist_colocation
|
||||
| table pg_dist_local_group
|
||||
| table pg_dist_node
|
||||
| table pg_dist_node_metadata
|
||||
| table pg_dist_partition
|
||||
| table pg_dist_placement
|
||||
| table pg_dist_poolinfo
|
||||
| table pg_dist_rebalance_strategy
|
||||
| table pg_dist_shard
|
||||
| table pg_dist_transaction
|
||||
| type citus.distribution_type
|
||||
| type citus.shard_transfer_mode
|
||||
| type citus_copy_format
|
||||
| type noderole
|
||||
| view citus_dist_stat_activity
|
||||
| view citus_lock_waits
|
||||
| view citus_shard_indexes_on_worker
|
||||
| view citus_shards_on_worker
|
||||
| view citus_stat_statements
|
||||
| view citus_worker_stat_activity
|
||||
| view pg_dist_shard_placement
|
||||
(188 rows)
|
||||
|
||||
-- Test downgrade to 9.2-2 from 9.2-4
|
||||
ALTER EXTENSION citus UPDATE TO '9.2-4';
|
||||
ALTER EXTENSION citus UPDATE TO '9.2-2';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
/*
|
||||
* As we mistakenly bumped schema version to 9.3-1 in a bad release, we support
|
||||
* updating citus schema from 9.3-1 to 9.2-4, but we do not support updates to 9.3-1.
|
||||
*
|
||||
* Hence the query below should fail.
|
||||
*/
|
||||
ALTER EXTENSION citus UPDATE TO '9.3-1';
|
||||
ERROR: extension "citus" has no update path from version "9.2-2" to version "9.3-1"
|
||||
ALTER EXTENSION citus UPDATE TO '9.2-4';
|
||||
-- Snapshot of state at 9.2-4
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Test downgrade to 9.2-4 from 9.3-2
|
||||
ALTER EXTENSION citus UPDATE TO '9.3-2';
|
||||
ALTER EXTENSION citus UPDATE TO '9.2-4';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Snapshot of state at 9.3-2
|
||||
ALTER EXTENSION citus UPDATE TO '9.3-2';
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
| function citus_remote_connection_stats()
|
||||
| function replicate_reference_tables()
|
||||
| function truncate_local_data_after_distributing_table(regclass)
|
||||
| function update_distributed_table_colocation(regclass,text)
|
||||
| function worker_create_or_alter_role(text,text,text)
|
||||
(5 rows)
|
||||
|
||||
-- Test downgrade to 9.3-2 from 9.4-1
|
||||
ALTER EXTENSION citus UPDATE TO '9.4-1';
|
||||
ALTER EXTENSION citus UPDATE TO '9.3-2';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Snapshot of state at 9.4-1
|
||||
ALTER EXTENSION citus UPDATE TO '9.4-1';
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
| function worker_last_saved_explain_analyze()
|
||||
| function worker_save_query_explain_analyze(text,jsonb)
|
||||
(2 rows)
|
||||
|
||||
-- Test downgrade to 9.4-1 from 9.5-1
|
||||
ALTER EXTENSION citus UPDATE TO '9.5-1';
|
||||
BEGIN;
|
||||
SELECT master_add_node('localhost', :master_port, groupId=>0);
|
||||
master_add_node
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE citus_local_table (a int);
|
||||
SELECT create_citus_local_table('citus_local_table');
|
||||
NOTICE: create_citus_local_table is deprecated in favour of citus_add_local_table_to_metadata
|
||||
create_citus_local_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- downgrade from 9.5-1 to 9.4-1 should fail as we have a citus local table
|
||||
ALTER EXTENSION citus UPDATE TO '9.4-1';
|
||||
ERROR: citus local tables are introduced in Citus 9.5
|
||||
HINT: To downgrade Citus to an older version, you should first convert each citus local table to a postgres table by executing SELECT undistribute_table("%s")
|
||||
CONTEXT: PL/pgSQL function inline_code_block line 11 at RAISE
|
||||
ROLLBACK;
|
||||
-- now we can downgrade as there is no citus local table
|
||||
ALTER EXTENSION citus UPDATE TO '9.4-1';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Snapshot of state at 9.5-1
|
||||
ALTER EXTENSION citus UPDATE TO '9.5-1';
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
function master_drop_sequences(text[]) |
|
||||
function task_tracker_assign_task(bigint,integer,text) |
|
||||
function task_tracker_cleanup_job(bigint) |
|
||||
function task_tracker_conninfo_cache_invalidate() |
|
||||
function task_tracker_task_status(bigint,integer) |
|
||||
function worker_execute_sql_task(bigint,integer,text,boolean) |
|
||||
function worker_merge_files_and_run_query(bigint,integer,text,text) |
|
||||
| function create_citus_local_table(regclass)
|
||||
| function undistribute_table(regclass)
|
||||
| function worker_record_sequence_dependency(regclass,regclass,name)
|
||||
(10 rows)
|
||||
|
||||
-- Test downgrade to 9.5-1 from 10.0-1
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-1';
|
||||
ALTER EXTENSION citus UPDATE TO '9.5-1';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Snapshot of state at 10.0-1
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-1';
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
function citus_total_relation_size(regclass) |
|
||||
function create_citus_local_table(regclass) |
|
||||
function mark_tables_colocated(regclass,regclass[]) |
|
||||
function master_conninfo_cache_invalidate() |
|
||||
function master_create_distributed_table(regclass,text,citus.distribution_type) |
|
||||
function master_create_worker_shards(text,integer,integer) |
|
||||
function master_dist_local_group_cache_invalidate() |
|
||||
function master_dist_node_cache_invalidate() |
|
||||
function master_dist_object_cache_invalidate() |
|
||||
function master_dist_partition_cache_invalidate() |
|
||||
function master_dist_placement_cache_invalidate() |
|
||||
function master_dist_shard_cache_invalidate() |
|
||||
function master_drop_all_shards(regclass,text,text) |
|
||||
function master_modify_multiple_shards(text) |
|
||||
function undistribute_table(regclass) |
|
||||
function upgrade_to_reference_table(regclass) |
|
||||
| function alter_distributed_table(regclass,text,integer,text,boolean)
|
||||
| function alter_old_partitions_set_access_method(regclass,timestamp with time zone,name)
|
||||
| function alter_table_set_access_method(regclass,text)
|
||||
| function citus_activate_node(text,integer)
|
||||
| function citus_add_inactive_node(text,integer,integer,noderole,name)
|
||||
| function citus_add_local_table_to_metadata(regclass,boolean)
|
||||
| function citus_add_node(text,integer,integer,noderole,name)
|
||||
| function citus_add_secondary_node(text,integer,text,integer,name)
|
||||
| function citus_conninfo_cache_invalidate()
|
||||
| function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode)
|
||||
| function citus_disable_node(text,integer)
|
||||
| function citus_dist_local_group_cache_invalidate()
|
||||
| function citus_dist_node_cache_invalidate()
|
||||
| function citus_dist_object_cache_invalidate()
|
||||
| function citus_dist_partition_cache_invalidate()
|
||||
| function citus_dist_placement_cache_invalidate()
|
||||
| function citus_dist_shard_cache_invalidate()
|
||||
| function citus_drain_node(text,integer,citus.shard_transfer_mode,name)
|
||||
| function citus_drop_all_shards(regclass,text,text)
|
||||
| function citus_internal.columnar_ensure_objects_exist()
|
||||
| function citus_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode)
|
||||
| function citus_remove_node(text,integer)
|
||||
| function citus_set_coordinator_host(text,integer,noderole,name)
|
||||
| function citus_set_node_property(text,integer,text,boolean)
|
||||
| function citus_shard_sizes()
|
||||
| function citus_total_relation_size(regclass,boolean)
|
||||
| function citus_unmark_object_distributed(oid,oid,integer)
|
||||
| function citus_update_node(integer,text,integer,boolean,integer)
|
||||
| function citus_update_shard_statistics(bigint)
|
||||
| function citus_update_table_statistics(regclass)
|
||||
| function fix_pre_citus10_partitioned_table_constraint_names()
|
||||
| function fix_pre_citus10_partitioned_table_constraint_names(regclass)
|
||||
| function notify_constraint_dropped()
|
||||
| function remove_local_tables_from_metadata()
|
||||
| function time_partition_range(regclass)
|
||||
| function undistribute_table(regclass,boolean)
|
||||
| function worker_change_sequence_dependency(regclass,regclass,regclass)
|
||||
| function worker_fix_pre_citus10_partitioned_table_constraint_names(regclass,bigint,text)
|
||||
| schema columnar
|
||||
| sequence columnar.storageid_seq
|
||||
| table columnar.chunk
|
||||
| table columnar.chunk_group
|
||||
| table columnar.options
|
||||
| table columnar.stripe
|
||||
| view citus_shards
|
||||
| view citus_tables
|
||||
| view time_partitions
|
||||
(63 rows)
|
||||
|
||||
-- Test downgrade to 10.0-1 from 10.0-2
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-2';
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-1';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Snapshot of state at 10.0-2
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-2';
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Test downgrade to 10.0-2 from 10.0-3
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-3';
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-2';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Snapshot of state at 10.0-3
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-3';
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
| function citus_get_active_worker_nodes()
|
||||
(1 row)
|
||||
|
||||
-- Test downgrade to 10.0-3 from 10.1-1
|
||||
ALTER EXTENSION citus UPDATE TO '10.1-1';
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-3';
|
||||
-- Should be empty result since upgrade+downgrade should be a no-op
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Snapshot of state at 10.1-1
|
||||
ALTER EXTENSION citus UPDATE TO '10.1-1';
|
||||
SELECT * FROM print_extension_changes();
|
||||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
DROP TABLE prev_objects, extension_diff;
|
||||
-- show running version
|
||||
SHOW citus.version;
|
||||
citus.version
|
||||
---------------------------------------------------------------------
|
||||
10.1devel
|
||||
(1 row)
|
||||
|
||||
-- ensure no unexpected objects were created outside pg_catalog
|
||||
SELECT pgio.type, pgio.identity
|
||||
FROM pg_depend AS pgd,
|
||||
pg_extension AS pge,
|
||||
LATERAL pg_identify_object(pgd.classid, pgd.objid, pgd.objsubid) AS pgio
|
||||
WHERE pgd.refclassid = 'pg_extension'::regclass AND
|
||||
pgd.refobjid = pge.oid AND
|
||||
pge.extname = 'citus' AND
|
||||
pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar')
|
||||
ORDER BY 1, 2;
|
||||
type | identity
|
||||
---------------------------------------------------------------------
|
||||
view | public.citus_tables
|
||||
(1 row)
|
||||
|
||||
-- see incompatible version errors out
|
||||
RESET citus.enable_version_checks;
|
||||
DROP EXTENSION citus;
|
||||
CREATE EXTENSION citus VERSION '8.0-1';
|
||||
ERROR: specified version incompatible with loaded Citus library
|
||||
DETAIL: Loaded library requires 10.1, but 8.0-1 was specified.
|
||||
HINT: If a newer library is present, restart the database and try the command again.
|
||||
-- Test non-distributed queries work even in version mismatch
|
||||
SET citus.enable_version_checks TO 'false';
|
||||
CREATE EXTENSION citus VERSION '8.1-1';
|
||||
SET citus.enable_version_checks TO 'true';
|
||||
-- Test CREATE TABLE
|
||||
CREATE TABLE version_mismatch_table(column1 int);
|
||||
-- Test COPY
|
||||
\copy version_mismatch_table FROM STDIN;
|
||||
-- Test INSERT
|
||||
INSERT INTO version_mismatch_table(column1) VALUES(5);
|
||||
-- Test SELECT
|
||||
SELECT * FROM version_mismatch_table ORDER BY column1;
|
||||
column1
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
(6 rows)
|
||||
|
||||
-- Test SELECT from pg_catalog
|
||||
SELECT d.datname as "Name",
|
||||
pg_catalog.pg_get_userbyid(d.datdba) as "Owner",
|
||||
pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges"
|
||||
FROM pg_catalog.pg_database d
|
||||
ORDER BY 1;
|
||||
Name | Owner | Access privileges
|
||||
---------------------------------------------------------------------
|
||||
postgres | postgres |
|
||||
regression | postgres |
|
||||
template0 | postgres | =c/postgres +
|
||||
| | postgres=CTc/postgres
|
||||
template1 | postgres | =c/postgres +
|
||||
| | postgres=CTc/postgres
|
||||
(4 rows)
|
||||
|
||||
-- We should not distribute table in version mistmatch
|
||||
SELECT create_distributed_table('version_mismatch_table', 'column1');
|
||||
ERROR: loaded Citus library version differs from installed extension version
|
||||
DETAIL: Loaded library requires 10.1, but the installed extension version is 8.1-1.
|
||||
HINT: Run ALTER EXTENSION citus UPDATE and try again.
|
||||
-- This function will cause fail in next ALTER EXTENSION
|
||||
CREATE OR REPLACE FUNCTION pg_catalog.relation_is_a_known_shard(regclass)
|
||||
RETURNS void LANGUAGE plpgsql
|
||||
AS $function$
|
||||
BEGIN
|
||||
END;
|
||||
$function$;
|
||||
ERROR: cannot change return type of existing function
|
||||
HINT: Use DROP FUNCTION relation_is_a_known_shard(regclass) first.
|
||||
SET citus.enable_version_checks TO 'false';
|
||||
-- This will fail because of previous function declaration
|
||||
ALTER EXTENSION citus UPDATE TO '8.1-1';
|
||||
NOTICE: version "8.1-1" of extension "citus" is already installed
|
||||
-- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on
|
||||
SET citus.enable_version_checks TO 'true';
|
||||
DROP FUNCTION pg_catalog.relation_is_a_known_shard(regclass);
|
||||
ERROR: cannot drop function relation_is_a_known_shard(regclass) because extension citus requires it
|
||||
HINT: You can drop extension citus instead.
|
||||
SET citus.enable_version_checks TO 'false';
|
||||
ALTER EXTENSION citus UPDATE TO '8.1-1';
|
||||
NOTICE: version "8.1-1" of extension "citus" is already installed
|
||||
-- Test updating to the latest version without specifying the version number
|
||||
ALTER EXTENSION citus UPDATE;
|
||||
-- re-create in newest version
|
||||
DROP EXTENSION citus;
|
||||
\c
|
||||
CREATE EXTENSION citus;
|
||||
-- test cache invalidation in workers
|
||||
\c - - - :worker_1_port
|
||||
DROP EXTENSION citus;
|
||||
SET citus.enable_version_checks TO 'false';
|
||||
CREATE EXTENSION citus VERSION '8.0-1';
|
||||
SET citus.enable_version_checks TO 'true';
|
||||
-- during ALTER EXTENSION, we should invalidate the cache
|
||||
ALTER EXTENSION citus UPDATE;
|
||||
-- if cache is invalidated succesfull, this \d should work without any problem
|
||||
\d
|
||||
List of relations
|
||||
Schema | Name | Type | Owner
|
||||
---------------------------------------------------------------------
|
||||
public | citus_tables | view | postgres
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- test https://github.com/citusdata/citus/issues/3409
|
||||
CREATE USER testuser2 SUPERUSER;
|
||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
SET ROLE testuser2;
|
||||
DROP EXTENSION Citus;
|
||||
-- Loop until we see there's no maintenance daemon running
|
||||
DO $$begin
|
||||
for i in 0 .. 100 loop
|
||||
if i = 100 then raise 'Waited too long'; end if;
|
||||
PERFORM pg_stat_clear_snapshot();
|
||||
perform * from pg_stat_activity where application_name = 'Citus Maintenance Daemon';
|
||||
if not found then exit; end if;
|
||||
perform pg_sleep(0.1);
|
||||
end loop;
|
||||
end$$;
|
||||
SELECT datid, datname, usename FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
||||
datid | datname | usename
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
CREATE EXTENSION Citus;
|
||||
-- Loop until we there's a maintenance daemon running
|
||||
DO $$begin
|
||||
for i in 0 .. 100 loop
|
||||
if i = 100 then raise 'Waited too long'; end if;
|
||||
PERFORM pg_stat_clear_snapshot();
|
||||
perform * from pg_stat_activity where application_name = 'Citus Maintenance Daemon';
|
||||
if found then exit; end if;
|
||||
perform pg_sleep(0.1);
|
||||
end loop;
|
||||
end$$;
|
||||
SELECT datid, datname, usename FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
||||
datid | datname | usename
|
||||
---------------------------------------------------------------------
|
||||
16384 | regression | testuser2
|
||||
(1 row)
|
||||
|
||||
RESET ROLE;
|
||||
-- check that maintenance daemon gets (re-)started for the right user
|
||||
DROP EXTENSION citus;
|
||||
CREATE USER testuser SUPERUSER;
|
||||
SET ROLE testuser;
|
||||
CREATE EXTENSION citus;
|
||||
SELECT datname, current_database(),
|
||||
usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
|
||||
FROM test.maintenance_worker();
|
||||
datname | current_database | usename | extowner
|
||||
---------------------------------------------------------------------
|
||||
regression | regression | testuser | testuser
|
||||
(1 row)
|
||||
|
||||
-- and recreate as the right owner
|
||||
RESET ROLE;
|
||||
DROP EXTENSION citus;
|
||||
CREATE EXTENSION citus;
|
||||
-- Check that maintenance daemon can also be started in another database
|
||||
CREATE DATABASE another;
|
||||
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
|
||||
DETAIL: Citus does not propagate CREATE DATABASE command to workers
|
||||
HINT: You can manually create a database and its extensions on workers.
|
||||
\c another
|
||||
CREATE EXTENSION citus;
|
||||
CREATE SCHEMA test;
|
||||
:create_function_test_maintenance_worker
|
||||
-- see that the daemon started
|
||||
SELECT datname, current_database(),
|
||||
usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
|
||||
FROM test.maintenance_worker();
|
||||
datname | current_database | usename | extowner
|
||||
---------------------------------------------------------------------
|
||||
another | another | postgres | postgres
|
||||
(1 row)
|
||||
|
||||
-- Test that database with active worker can be dropped.
|
||||
\c regression
|
||||
CREATE SCHEMA test_daemon;
|
||||
-- we create a similar function on the regression database
|
||||
-- note that this function checks for the existence of the daemon
|
||||
-- when not found, returns true else tries for 5 times and
|
||||
-- returns false
|
||||
CREATE OR REPLACE FUNCTION test_daemon.maintenance_daemon_died(p_dbname text)
|
||||
RETURNS boolean
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
activity record;
|
||||
BEGIN
|
||||
PERFORM pg_stat_clear_snapshot();
|
||||
SELECT * INTO activity FROM pg_stat_activity
|
||||
WHERE application_name = 'Citus Maintenance Daemon' AND datname = p_dbname;
|
||||
IF activity.pid IS NULL THEN
|
||||
RETURN true;
|
||||
ELSE
|
||||
RETURN false;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
-- drop the database and see that the daemon is dead
|
||||
DROP DATABASE another;
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
test_daemon.maintenance_daemon_died('another');
|
||||
maintenance_daemon_died
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- we don't need the schema and the function anymore
|
||||
DROP SCHEMA test_daemon CASCADE;
|
||||
NOTICE: drop cascades to function test_daemon.maintenance_daemon_died(text)
|
||||
-- verify citus does not crash while creating a table when run against an older worker
|
||||
-- create_distributed_table piggybacks multiple commands into single one, if one worker
|
||||
-- did not have the required UDF it should fail instead of crash.
|
||||
-- create a test database, configure citus with single node
|
||||
CREATE DATABASE another;
|
||||
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
|
||||
DETAIL: Citus does not propagate CREATE DATABASE command to workers
|
||||
HINT: You can manually create a database and its extensions on workers.
|
||||
\c - - - :worker_1_port
|
||||
CREATE DATABASE another;
|
||||
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
|
||||
DETAIL: Citus does not propagate CREATE DATABASE command to workers
|
||||
HINT: You can manually create a database and its extensions on workers.
|
||||
\c - - - :master_port
|
||||
\c another
|
||||
CREATE EXTENSION citus;
|
||||
SET citus.enable_object_propagation TO off; -- prevent distributed transactions during add node
|
||||
SELECT FROM master_add_node('localhost', :worker_1_port);
|
||||
WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker
|
||||
DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created
|
||||
--
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
CREATE EXTENSION citus;
|
||||
ALTER FUNCTION assign_distributed_transaction_id(initiator_node_identifier integer, transaction_number bigint, transaction_stamp timestamp with time zone)
|
||||
RENAME TO dummy_assign_function;
|
||||
\c - - - :master_port
|
||||
SET citus.shard_replication_factor to 1;
|
||||
-- create_distributed_table command should fail
|
||||
CREATE TABLE t1(a int, b int);
|
||||
SET client_min_messages TO ERROR;
|
||||
DO $$
|
||||
BEGIN
|
||||
BEGIN
|
||||
SELECT create_distributed_table('t1', 'a');
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RAISE 'create distributed table failed';
|
||||
END;
|
||||
END;
|
||||
$$;
|
||||
ERROR: create distributed table failed
|
||||
CONTEXT: PL/pgSQL function inline_code_block line 6 at RAISE
|
||||
\c regression
|
||||
\c - - - :master_port
|
||||
DROP DATABASE another;
|
||||
\c - - - :worker_1_port
|
||||
DROP DATABASE another;
|
||||
\c - - - :master_port
|
||||
-- only the regression database should have a maintenance daemon
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- recreate the extension immediately after the maintenancae daemon errors
|
||||
SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
||||
pg_cancel_backend
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
DROP EXTENSION citus;
|
||||
CREATE EXTENSION citus;
|
||||
-- wait for maintenance daemon restart
|
||||
SELECT datname, current_database(),
|
||||
usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
|
||||
FROM test.maintenance_worker();
|
||||
datname | current_database | usename | extowner
|
||||
---------------------------------------------------------------------
|
||||
regression | regression | postgres | postgres
|
||||
(1 row)
|
||||
|
||||
-- confirm that there is only one maintenance daemon
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- kill the maintenance daemon
|
||||
SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
||||
pg_cancel_backend
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- reconnect
|
||||
\c - - - :master_port
|
||||
-- run something that goes through planner hook and therefore kicks of maintenance daemon
|
||||
SELECT 1;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- wait for maintenance daemon restart
|
||||
SELECT datname, current_database(),
|
||||
usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
|
||||
FROM test.maintenance_worker();
|
||||
datname | current_database | usename | extowner
|
||||
---------------------------------------------------------------------
|
||||
regression | regression | postgres | postgres
|
||||
(1 row)
|
||||
|
||||
-- confirm that there is only one maintenance daemon
|
||||
SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
DROP TABLE version_mismatch_table;
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue