Merge pull request #3900 from citusdata/enh/pg13Support

Add PG13 support
pull/4088/head
SaitTalhaNisanci 2020-08-04 23:53:47 +03:00 committed by GitHub
commit 3d1fd08fcf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
137 changed files with 13900 additions and 893 deletions

View File

@ -6,7 +6,7 @@ orbs:
jobs:
build:
docker:
- image: 'citus/extbuilder:latest'
- image: 'citus/extbuilder-13:latest'
steps:
- checkout
- run:
@ -81,6 +81,8 @@ jobs:
- codecov/upload:
flags: 'test_11,multi'
test-11_check-van-mx:
docker:
- image: 'citus/exttester-11:latest'
@ -154,6 +156,18 @@ jobs:
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext --target check-pg-upgrade --old-pg-version 11 --new-pg-version 12'
no_output_timeout: 2m
test-12-13_check-pg-upgrade:
docker:
- image: 'citus/pgupgradetester:latest'
working_directory: /home/circleci/project
steps:
- attach_workspace:
at: .
- run:
name: 'Install and test postgres upgrade'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext --target check-pg-upgrade --old-pg-version 12 --new-pg-version 13'
no_output_timeout: 2m
test-12_check-multi:
docker:
- image: 'citus/exttester-12:latest'
@ -250,6 +264,82 @@ jobs:
install-and-test-ext --target check-citus-upgrade-mixed --citus-pre-tar /install-pg11-citusv8.3.0.tar
no_output_timeout: 2m
test-13_check-multi:
docker:
- image: 'citus/exttester-13:latest'
working_directory: /home/circleci/project
steps:
- attach_workspace:
at: .
- run:
name: 'Install and Test (check-multi)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-multi'
no_output_timeout: 2m
- codecov/upload:
flags: 'test_13,multi'
test-13_check-van-mx:
docker:
- image: 'citus/exttester-13:latest'
working_directory: /home/circleci/project
steps:
- attach_workspace:
at: .
- run:
name: 'Install and Test (check-van-mx)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-vanilla check-multi-mx'
no_output_timeout: 2m
- codecov/upload:
flags: 'test_13,vanilla,mx'
test-13_check-iso-work-fol:
docker:
- image: 'citus/exttester-13:latest'
working_directory: /home/circleci/project
steps:
- attach_workspace:
at: .
- run:
name: 'Install and Test (check-iso-work-fol)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-isolation check-worker'
no_output_timeout: 2m
- codecov/upload:
flags: 'test_13,isolation,worker'
test-13_check-fol:
docker:
- image: 'citus/exttester-13:latest'
working_directory: /home/circleci/project
steps:
- attach_workspace:
at: .
- run:
name: 'Enable core dumps'
command: 'ulimit -c unlimited'
- run:
name: 'Install and Test (fol)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-follower-cluster'
no_output_timeout: 2m
- run:
command: |
mkdir -p /tmp/core_dumps
cp core.* /tmp/core_dumps
when: on_fail
- codecov/upload:
flags: 'test_13,follower'
- store_artifacts:
path: '/tmp/core_dumps'
test-13_check-failure:
docker:
- image: 'citus/failtester-13:latest'
working_directory: /home/circleci/project
steps:
- attach_workspace:
at: .
- run:
name: 'Install and Test (check-failure)'
command: 'chown -R circleci:circleci /home/circleci && install-and-test-ext check-failure'
no_output_timeout: 2m
check-merge-to-enterprise:
docker:
- image: buildpack-deps:stretch
@ -325,8 +415,21 @@ workflows:
- test-12_check-failure:
requires: [build]
- test-13_check-multi:
requires: [build]
- test-13_check-van-mx:
requires: [build]
- test-13_check-iso-work-fol:
requires: [build]
- test-13_check-fol:
requires: [build]
- test-13_check-failure:
requires: [build]
- test-11-12_check-pg-upgrade:
requires: [build]
- test-12-13_check-pg-upgrade:
requires: [build]
- test-11_check-citus-upgrade:
requires: [build]

3
.gitattributes vendored
View File

@ -27,9 +27,10 @@ configure -whitespace
# except these exceptions...
src/backend/distributed/utils/citus_outfuncs.c -citus-style
src/backend/distributed/utils/pg11_snprintf.c -citus-style
src/backend/distributed/deparser/ruleutils_10.c -citus-style
src/backend/distributed/deparser/ruleutils_11.c -citus-style
src/backend/distributed/deparser/ruleutils_12.c -citus-style
src/backend/distributed/deparser/ruleutils_13.c -citus-style
src/include/distributed/citus_nodes.h -citus-style
/vendor/** -citus-style

2
configure vendored
View File

@ -2533,7 +2533,7 @@ if test -z "$version_num"; then
as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5
fi
if test "$version_num" != '11' -a "$version_num" != '12'; then
if test "$version_num" != '11' -a "$version_num" != '12' -a "$version_num" != '13'; then
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5

View File

@ -571,7 +571,7 @@ ColocationIdForNewTable(Oid relationId, Var *distributionColumn,
*/
Assert(distributionMethod == DISTRIBUTE_BY_HASH);
Relation pgDistColocation = heap_open(DistColocationRelationId(), ExclusiveLock);
Relation pgDistColocation = table_open(DistColocationRelationId(), ExclusiveLock);
Oid distributionColumnType = distributionColumn->vartype;
Oid distributionColumnCollation = get_typcollation(distributionColumnType);
@ -618,12 +618,12 @@ ColocationIdForNewTable(Oid relationId, Var *distributionColumn,
if (createdColocationGroup)
{
/* keep the exclusive lock */
heap_close(pgDistColocation, NoLock);
table_close(pgDistColocation, NoLock);
}
else
{
/* release the exclusive lock */
heap_close(pgDistColocation, ExclusiveLock);
table_close(pgDistColocation, ExclusiveLock);
}
}
@ -1266,7 +1266,7 @@ static void
CopyLocalDataIntoShards(Oid distributedRelationId)
{
/* take an ExclusiveLock to block all operations except SELECT */
Relation distributedRelation = heap_open(distributedRelationId, ExclusiveLock);
Relation distributedRelation = table_open(distributedRelationId, ExclusiveLock);
/*
* Skip copying from partitioned tables, we will copy the data from
@ -1274,7 +1274,7 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
*/
if (PartitionedTable(distributedRelationId))
{
heap_close(distributedRelation, NoLock);
table_close(distributedRelation, NoLock);
return;
}
@ -1330,7 +1330,7 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
/* free memory and close the relation */
ExecDropSingleTupleTableSlot(slot);
FreeExecutorState(estate);
heap_close(distributedRelation, NoLock);
table_close(distributedRelation, NoLock);
PopActiveSnapshot();
}

View File

@ -388,7 +388,7 @@ ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId)
int scanKeyCount = 1;
bool foreignKeyToReferenceTableIncludesGivenColumn = false;
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_constraint_contype, BTEqualStrategyNumber,
F_CHAREQ, CharGetDatum(CONSTRAINT_FOREIGN));
@ -446,7 +446,7 @@ ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId)
/* clean up scan and close system catalog */
systable_endscan(scanDescriptor);
heap_close(pgConstraint, NoLock);
table_close(pgConstraint, NoLock);
return foreignKeyToReferenceTableIncludesGivenColumn;
}
@ -720,7 +720,7 @@ GetForeignKeyOids(Oid relationId, int flags)
ScanKeyData scanKey[1];
int scanKeyCount = 1;
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], pgConstraintTargetAttrNumber,
BTEqualStrategyNumber, F_OIDEQ, relationId);
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, indexOid, useIndex,
@ -770,7 +770,7 @@ GetForeignKeyOids(Oid relationId, int flags)
* on pg_constraint to make sure that caller will process valid foreign key
* constraints through the transaction.
*/
heap_close(pgConstraint, NoLock);
table_close(pgConstraint, NoLock);
return foreignKeyOids;
}

View File

@ -47,6 +47,7 @@
#include "distributed/multi_executor.h"
#include "distributed/namespace_utils.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/version_compat.h"
#include "distributed/worker_create_or_replace.h"
#include "distributed/worker_transaction.h"
#include "nodes/makefuncs.h"
@ -352,7 +353,7 @@ GetFunctionColocationId(Oid functionOid, char *colocateWithTableName,
Oid distributionArgumentOid)
{
int colocationId = INVALID_COLOCATION_ID;
Relation pgDistColocation = heap_open(DistColocationRelationId(), ShareLock);
Relation pgDistColocation = table_open(DistColocationRelationId(), ShareLock);
if (pg_strncasecmp(colocateWithTableName, "default", NAMEDATALEN) == 0)
{
@ -400,7 +401,7 @@ GetFunctionColocationId(Oid functionOid, char *colocateWithTableName,
}
/* keep the lock */
heap_close(pgDistColocation, NoLock);
table_close(pgDistColocation, NoLock);
return colocationId;
}
@ -489,7 +490,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
bool isnull[Natts_pg_dist_object];
bool replace[Natts_pg_dist_object];
Relation pgDistObjectRel = heap_open(DistObjectRelationId(), RowExclusiveLock);
Relation pgDistObjectRel = table_open(DistObjectRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistObjectRel);
/* scan pg_dist_object for classid = $1 AND objid = $2 AND objsubid = $3 via index */
@ -549,7 +550,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
systable_endscan(scanDescriptor);
heap_close(pgDistObjectRel, NoLock);
table_close(pgDistObjectRel, NoLock);
}

View File

@ -138,7 +138,7 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand)
* checked permissions, and will only fail when executing the actual
* index statements.
*/
Relation relation = heap_openrv(createIndexStatement->relation, lockmode);
Relation relation = table_openrv(createIndexStatement->relation, lockmode);
Oid relationId = RelationGetRelid(relation);
bool isCitusRelation = IsCitusTable(relationId);
@ -160,7 +160,7 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand)
relationContext, namespaceName);
}
heap_close(relation, NoLock);
table_close(relation, NoLock);
if (isCitusRelation)
{
@ -246,7 +246,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand)
RangeVarGetRelidExtended(reindexStatement->relation, lockmode, 0,
RangeVarCallbackOwnsTable, NULL);
relation = heap_openrv(reindexStatement->relation, NoLock);
relation = table_openrv(reindexStatement->relation, NoLock);
relationId = RelationGetRelid(relation);
}
@ -275,7 +275,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand)
}
else
{
heap_close(relation, NoLock);
table_close(relation, NoLock);
}
if (isCitusRelation)
@ -426,13 +426,13 @@ PostprocessIndexStmt(Node *node, const char *queryString)
StartTransactionCommand();
/* get the affected relation and index */
Relation relation = heap_openrv(indexStmt->relation, ShareUpdateExclusiveLock);
Relation relation = table_openrv(indexStmt->relation, ShareUpdateExclusiveLock);
Oid indexRelationId = get_relname_relid(indexStmt->idxname,
schemaId);
Relation indexRelation = index_open(indexRelationId, RowExclusiveLock);
/* close relations but retain locks */
heap_close(relation, NoLock);
table_close(relation, NoLock);
index_close(indexRelation, NoLock);
/* mark index as invalid, in-place (cannot be rolled back) */
@ -443,7 +443,7 @@ PostprocessIndexStmt(Node *node, const char *queryString)
StartTransactionCommand();
/* now, update index's validity in a way that can roll back */
Relation pg_index = heap_open(IndexRelationId, RowExclusiveLock);
Relation pg_index = table_open(IndexRelationId, RowExclusiveLock);
HeapTuple indexTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(
indexRelationId));
@ -457,7 +457,7 @@ PostprocessIndexStmt(Node *node, const char *queryString)
/* clean up; index now marked valid, but ROLLBACK will mark invalid */
heap_freetuple(indexTuple);
heap_close(pg_index, RowExclusiveLock);
table_close(pg_index, RowExclusiveLock);
return NIL;
}

View File

@ -34,6 +34,7 @@
#include "distributed/local_executor.h"
#include "distributed/local_multi_copy.h"
#include "distributed/shard_utils.h"
#include "distributed/version_compat.h"
static int ReadFromLocalBufferCallback(void *outBuf, int minRead, int maxRead);
static void AddSlotToBuffer(TupleTableSlot *slot, CitusCopyDestReceiver *copyDest,
@ -160,7 +161,7 @@ DoLocalCopy(StringInfo buffer, Oid relationId, int64 shardId, CopyStmt *copyStat
LocalCopyBuffer = buffer;
Oid shardOid = GetTableLocalShardOid(relationId, shardId);
Relation shard = heap_open(shardOid, RowExclusiveLock);
Relation shard = table_open(shardOid, RowExclusiveLock);
ParseState *pState = make_parsestate(NULL);
/* p_rtable of pState is set so that we can check constraints. */
@ -172,7 +173,7 @@ DoLocalCopy(StringInfo buffer, Oid relationId, int64 shardId, CopyStmt *copyStat
CopyFrom(cstate);
EndCopyFrom(cstate);
heap_close(shard, NoLock);
table_close(shard, NoLock);
free_parsestate(pState);
}

View File

@ -93,10 +93,14 @@
#include "distributed/hash_helpers.h"
#include "executor/executor.h"
#include "foreign/foreign.h"
#include "libpq/libpq.h"
#include "libpq/pqformat.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "tcop/cmdtag.h"
#endif
#include "tsearch/ts_locale.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
@ -211,8 +215,10 @@ typedef struct ShardConnections
/* Local functions forward declarations */
static void CopyToExistingShards(CopyStmt *copyStatement, char *completionTag);
static void CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId);
static void CopyToExistingShards(CopyStmt *copyStatement,
QueryCompletionCompat *completionTag);
static void CopyToNewShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag,
Oid relationId);
static void OpenCopyConnectionsForNewShards(CopyStmt *copyStatement,
ShardConnections *shardConnections, bool
stopOnFailure,
@ -244,7 +250,7 @@ static FmgrInfo * TypeOutputFunctions(uint32 columnCount, Oid *typeIdArray,
bool binaryFormat);
static List * CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist);
static bool CopyStatementHasFormat(CopyStmt *copyStatement, char *formatName);
static void CitusCopyFrom(CopyStmt *copyStatement, char *completionTag);
static void CitusCopyFrom(CopyStmt *copyStatement, QueryCompletionCompat *completionTag);
static HTAB * CreateConnectionStateHash(MemoryContext memoryContext);
static HTAB * CreateShardStateHash(MemoryContext memoryContext);
static CopyConnectionState * GetConnectionState(HTAB *connectionStateHash,
@ -277,7 +283,7 @@ static void UnclaimCopyConnections(List *connectionStateList);
static void ShutdownCopyConnectionState(CopyConnectionState *connectionState,
CitusCopyDestReceiver *copyDest);
static SelectStmt * CitusCopySelect(CopyStmt *copyStatement);
static void CitusCopyTo(CopyStmt *copyStatement, char *completionTag);
static void CitusCopyTo(CopyStmt *copyStatement, QueryCompletionCompat *completionTag);
static int64 ForwardCopyDataFromConnection(CopyOutState copyOutState,
MultiConnection *connection);
@ -313,6 +319,8 @@ static bool CitusCopyDestReceiverReceive(TupleTableSlot *slot,
static void CitusCopyDestReceiverShutdown(DestReceiver *destReceiver);
static void CitusCopyDestReceiverDestroy(DestReceiver *destReceiver);
static bool ContainsLocalPlacement(int64 shardId);
static void CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64
processedRowCount);
static void FinishLocalCopy(CitusCopyDestReceiver *copyDest);
static void CloneCopyOutStateForLocalCopy(CopyOutState from, CopyOutState to);
static bool ShouldExecuteCopyLocally(bool isIntermediateResult);
@ -329,7 +337,7 @@ PG_FUNCTION_INFO_V1(citus_text_send_as_jsonb);
* and the partition method of the distributed table.
*/
static void
CitusCopyFrom(CopyStmt *copyStatement, char *completionTag)
CitusCopyFrom(CopyStmt *copyStatement, QueryCompletionCompat *completionTag)
{
UseCoordinatedTransaction();
@ -385,7 +393,7 @@ CitusCopyFrom(CopyStmt *copyStatement, char *completionTag)
* rows.
*/
static void
CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag)
{
Oid tableId = RangeVarGetRelid(copyStatement->relation, NoLock, false);
@ -410,7 +418,7 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
ErrorContextCallback errorCallback;
/* allocate column values and nulls arrays */
Relation distributedRelation = heap_open(tableId, RowExclusiveLock);
Relation distributedRelation = table_open(tableId, RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation);
uint32 columnCount = tupleDescriptor->natts;
Datum *columnValues = palloc0(columnCount * sizeof(Datum));
@ -545,7 +553,7 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
ExecDropSingleTupleTableSlot(tupleTableSlot);
FreeExecutorState(executorState);
heap_close(distributedRelation, NoLock);
table_close(distributedRelation, NoLock);
/* mark failed placements as inactive */
MarkFailedShardPlacements();
@ -554,8 +562,7 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
if (completionTag != NULL)
{
SafeSnprintf(completionTag, COMPLETION_TAG_BUFSIZE,
"COPY " UINT64_FORMAT, processedRowCount);
CompleteCopyQueryTagCompat(completionTag, processedRowCount);
}
}
@ -565,10 +572,11 @@ CopyToExistingShards(CopyStmt *copyStatement, char *completionTag)
* tables where we create new shards into which to copy rows.
*/
static void
CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId)
CopyToNewShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag, Oid
relationId)
{
/* allocate column values and nulls arrays */
Relation distributedRelation = heap_open(relationId, RowExclusiveLock);
Relation distributedRelation = table_open(relationId, RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation);
uint32 columnCount = tupleDescriptor->natts;
Datum *columnValues = palloc0(columnCount * sizeof(Datum));
@ -732,19 +740,30 @@ CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId)
}
EndCopyFrom(copyState);
heap_close(distributedRelation, NoLock);
table_close(distributedRelation, NoLock);
/* check for cancellation one last time before returning */
CHECK_FOR_INTERRUPTS();
if (completionTag != NULL)
{
SafeSnprintf(completionTag, COMPLETION_TAG_BUFSIZE,
"COPY " UINT64_FORMAT, processedRowCount);
CompleteCopyQueryTagCompat(completionTag, processedRowCount);
}
}
static void
CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount)
{
#if PG_VERSION_NUM >= PG_VERSION_13
SetQueryCompletion(completionTag, CMDTAG_COPY, processedRowCount);
#else
SafeSnprintf(completionTag, COMPLETION_TAG_BUFSIZE,
"COPY " UINT64_FORMAT, processedRowCount);
#endif
}
/*
* RemoveOptionFromList removes an option from a list of options in a
* COPY .. WITH (..) statement by name and returns the resulting list.
@ -753,18 +772,20 @@ static List *
RemoveOptionFromList(List *optionList, char *optionName)
{
ListCell *optionCell = NULL;
#if PG_VERSION_NUM < PG_VERSION_13
ListCell *previousCell = NULL;
#endif
foreach(optionCell, optionList)
{
DefElem *option = (DefElem *) lfirst(optionCell);
if (strncmp(option->defname, optionName, NAMEDATALEN) == 0)
{
return list_delete_cell(optionList, optionCell, previousCell);
return list_delete_cell_compat(optionList, optionCell, previousCell);
}
#if PG_VERSION_NUM < PG_VERSION_13
previousCell = optionCell;
#endif
}
return optionList;
@ -1423,7 +1444,7 @@ ColumnCoercionPaths(TupleDesc destTupleDescriptor, TupleDesc inputTupleDescripto
ConversionPathForTypes(inputTupleType, destTupleType,
&coercePaths[columnIndex]);
currentColumnName = lnext(currentColumnName);
currentColumnName = lnext_compat(columnNameList, currentColumnName);
if (currentColumnName == NULL)
{
@ -2136,7 +2157,7 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
const char *nullPrintCharacter = "\\N";
/* look up table properties */
Relation distributedRelation = heap_open(tableId, RowExclusiveLock);
Relation distributedRelation = table_open(tableId, RowExclusiveLock);
CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(tableId);
partitionMethod = cacheEntry->partitionMethod;
@ -2624,7 +2645,7 @@ CitusCopyDestReceiverShutdown(DestReceiver *destReceiver)
}
PG_END_TRY();
heap_close(distributedRelation, NoLock);
table_close(distributedRelation, NoLock);
}
@ -2767,7 +2788,8 @@ CopyStatementHasFormat(CopyStmt *copyStatement, char *formatName)
* further processing is needed.
*/
Node *
ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryString)
ProcessCopyStmt(CopyStmt *copyStatement, QueryCompletionCompat *completionTag, const
char *queryString)
{
/*
* Handle special COPY "resultid" FROM STDIN WITH (format result) commands
@ -2799,9 +2821,9 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
bool isFrom = copyStatement->is_from;
/* consider using RangeVarGetRelidExtended to check perms before locking */
Relation copiedRelation = heap_openrv(copyStatement->relation,
isFrom ? RowExclusiveLock :
AccessShareLock);
Relation copiedRelation = table_openrv(copyStatement->relation,
isFrom ? RowExclusiveLock :
AccessShareLock);
bool isCitusRelation = IsCitusTable(RelationGetRelid(copiedRelation));
@ -2814,7 +2836,7 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
schemaName = MemoryContextStrdup(relationContext, schemaName);
copyStatement->relation->schemaname = schemaName;
heap_close(copiedRelation, NoLock);
table_close(copiedRelation, NoLock);
if (isCitusRelation)
{
@ -2873,7 +2895,7 @@ CitusCopySelect(CopyStmt *copyStatement)
SelectStmt *selectStmt = makeNode(SelectStmt);
selectStmt->fromClause = list_make1(copyObject(copyStatement->relation));
Relation distributedRelation = heap_openrv(copyStatement->relation, AccessShareLock);
Relation distributedRelation = table_openrv(copyStatement->relation, AccessShareLock);
TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation);
List *targetList = NIL;
@ -2903,7 +2925,7 @@ CitusCopySelect(CopyStmt *copyStatement)
targetList = lappend(targetList, selectTarget);
}
heap_close(distributedRelation, NoLock);
table_close(distributedRelation, NoLock);
selectStmt->targetList = targetList;
return selectStmt;
@ -2915,12 +2937,12 @@ CitusCopySelect(CopyStmt *copyStatement)
* table dump.
*/
static void
CitusCopyTo(CopyStmt *copyStatement, char *completionTag)
CitusCopyTo(CopyStmt *copyStatement, QueryCompletionCompat *completionTag)
{
ListCell *shardIntervalCell = NULL;
int64 tuplesSent = 0;
Relation distributedRelation = heap_openrv(copyStatement->relation, AccessShareLock);
Relation distributedRelation = table_openrv(copyStatement->relation, AccessShareLock);
Oid relationId = RelationGetRelid(distributedRelation);
TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation);
@ -3002,12 +3024,11 @@ CitusCopyTo(CopyStmt *copyStatement, char *completionTag)
SendCopyEnd(copyOutState);
heap_close(distributedRelation, AccessShareLock);
table_close(distributedRelation, AccessShareLock);
if (completionTag != NULL)
{
SafeSnprintf(completionTag, COMPLETION_TAG_BUFSIZE, "COPY " UINT64_FORMAT,
tuplesSent);
CompleteCopyQueryTagCompat(completionTag, tuplesSent);
}
}
@ -3077,7 +3098,7 @@ CheckCopyPermissions(CopyStmt *copyStatement)
List *attnums;
ListCell *cur;
rel = heap_openrv(copyStatement->relation,
rel = table_openrv(copyStatement->relation,
is_from ? RowExclusiveLock : AccessShareLock);
range_table = CreateRangeTable(rel, required_access);
@ -3103,7 +3124,7 @@ CheckCopyPermissions(CopyStmt *copyStatement)
/* TODO: Perform RLS checks once supported */
heap_close(rel, NoLock);
table_close(rel, NoLock);
/* *INDENT-ON* */
}

View File

@ -33,6 +33,7 @@
#include "distributed/coordinator_protocol.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata_sync.h"
#include "distributed/version_compat.h"
#include "distributed/worker_transaction.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
@ -315,7 +316,7 @@ CreateCreateOrAlterRoleCommand(const char *roleName,
static const char *
ExtractEncryptedPassword(Oid roleOid)
{
Relation pgAuthId = heap_open(AuthIdRelationId, AccessShareLock);
Relation pgAuthId = table_open(AuthIdRelationId, AccessShareLock);
TupleDesc pgAuthIdDescription = RelationGetDescr(pgAuthId);
HeapTuple tuple = SearchSysCache1(AUTHOID, roleOid);
bool isNull = true;
@ -328,7 +329,7 @@ ExtractEncryptedPassword(Oid roleOid)
Datum passwordDatum = heap_getattr(tuple, Anum_pg_authid_rolpassword,
pgAuthIdDescription, &isNull);
heap_close(pgAuthId, AccessShareLock);
table_close(pgAuthId, AccessShareLock);
ReleaseSysCache(tuple);
if (isNull)
@ -527,7 +528,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
List *
GenerateAlterRoleSetCommandForRole(Oid roleid)
{
Relation DbRoleSetting = heap_open(DbRoleSettingRelationId, AccessShareLock);
Relation DbRoleSetting = table_open(DbRoleSettingRelationId, AccessShareLock);
TupleDesc DbRoleSettingDescription = RelationGetDescr(DbRoleSetting);
HeapTuple tuple = NULL;
List *commands = NIL;
@ -561,7 +562,7 @@ GenerateAlterRoleSetCommandForRole(Oid roleid)
}
heap_endscan(scan);
heap_close(DbRoleSetting, AccessShareLock);
table_close(DbRoleSetting, AccessShareLock);
return commands;
}

View File

@ -30,6 +30,7 @@
#include "distributed/resource_lock.h"
#include <distributed/remote_commands.h>
#include <distributed/remote_commands.h>
#include "distributed/version_compat.h"
#include "nodes/parsenodes.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
@ -71,7 +72,7 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString)
continue;
}
pgClass = heap_open(RelationRelationId, AccessShareLock);
pgClass = table_open(RelationRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_class_relnamespace, BTEqualStrategyNumber,
F_OIDEQ, namespaceOid);
@ -105,7 +106,7 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString)
MarkInvalidateForeignKeyGraph();
systable_endscan(scanDescriptor);
heap_close(pgClass, NoLock);
table_close(pgClass, NoLock);
return NIL;
}
@ -113,7 +114,7 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString)
}
systable_endscan(scanDescriptor);
heap_close(pgClass, NoLock);
table_close(pgClass, NoLock);
}
return NIL;

View File

@ -70,7 +70,7 @@ GetExplicitTriggerIdList(Oid relationId)
{
List *triggerIdList = NIL;
Relation pgTrigger = heap_open(TriggerRelationId, AccessShareLock);
Relation pgTrigger = table_open(TriggerRelationId, AccessShareLock);
int scanKeyCount = 1;
ScanKeyData scanKey[1];
@ -103,7 +103,7 @@ GetExplicitTriggerIdList(Oid relationId)
}
systable_endscan(scanDescriptor);
heap_close(pgTrigger, NoLock);
table_close(pgTrigger, NoLock);
return triggerIdList;
}

View File

@ -64,6 +64,7 @@
#include "distributed/remote_commands.h"
#include "distributed/transaction_management.h"
#include "distributed/worker_create_or_replace.h"
#include "distributed/version_compat.h"
#include "distributed/worker_manager.h"
#include "distributed/worker_transaction.h"
#include "miscadmin.h"
@ -791,7 +792,7 @@ EnumValsList(Oid typeOid)
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(typeOid));
Relation enum_rel = heap_open(EnumRelationId, AccessShareLock);
Relation enum_rel = table_open(EnumRelationId, AccessShareLock);
SysScanDesc enum_scan = systable_beginscan(enum_rel,
EnumTypIdSortOrderIndexId,
true, NULL,
@ -805,7 +806,7 @@ EnumValsList(Oid typeOid)
}
systable_endscan(enum_scan);
heap_close(enum_rel, AccessShareLock);
table_close(enum_rel, AccessShareLock);
return vals;
}

View File

@ -25,6 +25,8 @@
*-------------------------------------------------------------------------
*/
#include "distributed/pg_version_constants.h"
#include "postgres.h"
#include "miscadmin.h"
@ -88,7 +90,8 @@ static bool IsDropSchemaOrDB(Node *parsetree);
*/
void
CitusProcessUtility(Node *node, const char *queryString, ProcessUtilityContext context,
ParamListInfo params, DestReceiver *dest, char *completionTag)
ParamListInfo params, DestReceiver *dest,
QueryCompletionCompat *completionTag)
{
PlannedStmt *plannedStmt = makeNode(PlannedStmt);
plannedStmt->commandType = CMD_UTILITY;
@ -115,7 +118,7 @@ multi_ProcessUtility(PlannedStmt *pstmt,
ParamListInfo params,
struct QueryEnvironment *queryEnv,
DestReceiver *dest,
char *completionTag)
QueryCompletionCompat *completionTag)
{
Node *parsetree = pstmt->utilityStmt;
List *ddlJobs = NIL;

View File

@ -29,6 +29,10 @@
#include "storage/lmgr.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
#include "postmaster/bgworker_internals.h"
#define VACUUM_PARALLEL_NOTSET -2
/*
* Subset of VacuumParams we care about
@ -40,8 +44,11 @@ typedef struct CitusVacuumParams
VacOptTernaryValue truncate;
VacOptTernaryValue index_cleanup;
#endif
} CitusVacuumParams;
#if PG_VERSION_NUM >= PG_VERSION_13
int nworkers;
#endif
} CitusVacuumParams;
/* Local functions forward declarations for processing distributed table commands */
static bool IsDistributedVacuumStmt(int vacuumOptions, List *vacuumRelationIdList);
@ -284,6 +291,9 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
#if PG_VERSION_NUM >= PG_VERSION_12
&& vacuumParams.truncate == VACOPT_TERNARY_DEFAULT &&
vacuumParams.index_cleanup == VACOPT_TERNARY_DEFAULT
#endif
#if PG_VERSION_NUM >= PG_VERSION_13
&& vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET
#endif
)
{
@ -341,6 +351,13 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
}
#endif
#if PG_VERSION_NUM >= PG_VERSION_13
if (vacuumParams.nworkers != VACUUM_PARALLEL_NOTSET)
{
appendStringInfo(vacuumPrefix, "PARALLEL %d,", vacuumParams.nworkers);
}
#endif
vacuumPrefix->data[vacuumPrefix->len - 1] = ')';
appendStringInfoChar(vacuumPrefix, ' ');
@ -421,6 +438,8 @@ ExtractVacuumTargetRels(VacuumStmt *vacuumStmt)
/*
* This is mostly ExecVacuum from Postgres's commands/vacuum.c
* Note that ExecVacuum does an actual vacuum as well and we don't want
* that to happen in the coordinator hence we copied the rest here.
*/
static CitusVacuumParams
VacuumStmtParams(VacuumStmt *vacstmt)
@ -436,6 +455,9 @@ VacuumStmtParams(VacuumStmt *vacstmt)
/* Set default value */
params.index_cleanup = VACOPT_TERNARY_DEFAULT;
params.truncate = VACOPT_TERNARY_DEFAULT;
#if PG_VERSION_NUM >= PG_VERSION_13
params.nworkers = VACUUM_PARALLEL_NOTSET;
#endif
/* Parse options list */
DefElem *opt = NULL;
@ -484,6 +506,31 @@ VacuumStmtParams(VacuumStmt *vacstmt)
params.truncate = defGetBoolean(opt) ? VACOPT_TERNARY_ENABLED :
VACOPT_TERNARY_DISABLED;
}
#if PG_VERSION_NUM >= PG_VERSION_13
else if (strcmp(opt->defname, "parallel") == 0)
{
if (opt->arg == NULL)
{
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("parallel option requires a value between 0 and %d",
MAX_PARALLEL_WORKER_LIMIT)));
}
else
{
int nworkers = defGetInt32(opt);
if (nworkers < 0 || nworkers > MAX_PARALLEL_WORKER_LIMIT)
{
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("parallel vacuum degree must be between 0 and %d",
MAX_PARALLEL_WORKER_LIMIT)));
}
params.nworkers = nworkers;
}
}
#endif
else
{
ereport(ERROR,

View File

@ -33,6 +33,8 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "miscadmin.h"
#include "access/hash.h"
@ -45,8 +47,12 @@
#include "distributed/shared_connection_stats.h"
#include "distributed/tuplestore.h"
#include "distributed/worker_manager.h"
#include "utils/hashutils.h"
#include "utils/builtins.h"
#if PG_VERSION_NUM < PG_VERSION_13
#include "utils/hashutils.h"
#else
#include "common/hashfn.h"
#endif
#define RESERVED_CONNECTION_COLUMNS 4

View File

@ -11,6 +11,8 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "access/hash.h"
#include "distributed/colocation_utils.h"
#include "distributed/connection_management.h"
@ -24,6 +26,9 @@
#include "distributed/placement_connection.h"
#include "distributed/relation_access_tracking.h"
#include "utils/hsearch.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h"
#endif
#include "utils/memutils.h"

View File

@ -13,6 +13,8 @@
#include "postgres.h"
#include "pgstat.h"
#include "distributed/pg_version_constants.h"
#include "libpq-fe.h"
#include "miscadmin.h"
@ -33,8 +35,12 @@
#include "distributed/time_constants.h"
#include "distributed/tuplestore.h"
#include "utils/builtins.h"
#include "utils/hashutils.h"
#if PG_VERSION_NUM < PG_VERSION_13
#include "utils/hsearch.h"
#include "utils/hashutils.h"
#else
#include "common/hashfn.h"
#endif
#include "storage/ipc.h"

View File

@ -37,6 +37,7 @@
#include "commands/defrem.h"
#include "commands/extension.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/listutils.h"
#include "distributed/multi_partitioning_utils.h"
#include "distributed/relay_utility.h"
#include "distributed/metadata_utility.h"
@ -128,7 +129,7 @@ get_extension_schema(Oid ext_oid)
HeapTuple tuple;
ScanKeyData entry[1];
rel = heap_open(ExtensionRelationId, AccessShareLock);
rel = table_open(ExtensionRelationId, AccessShareLock);
ScanKeyInit(&entry[0],
#if PG_VERSION_NUM >= PG_VERSION_12
@ -152,7 +153,7 @@ get_extension_schema(Oid ext_oid)
systable_endscan(scandesc);
heap_close(rel, AccessShareLock);
table_close(rel, AccessShareLock);
return result;
/* *INDENT-ON* */
@ -813,6 +814,15 @@ deparse_index_columns(StringInfo buffer, List *indexParameterList, List *deparse
appendStringInfo(buffer, "%s ",
NameListToQuotedString(indexElement->opclass));
}
#if PG_VERSION_NUM >= PG_VERSION_13
/* Commit on postgres: 911e70207703799605f5a0e8aad9f06cff067c63*/
if (indexElement->opclassopts != NIL)
{
ereport(ERROR, errmsg(
"citus currently doesn't support operator class parameters in indexes"));
}
#endif
if (indexElement->ordering != SORTBY_DEFAULT)
{
@ -1174,7 +1184,7 @@ pg_get_replica_identity_command(Oid tableRelationId)
{
StringInfo buf = makeStringInfo();
Relation relation = heap_open(tableRelationId, AccessShareLock);
Relation relation = table_open(tableRelationId, AccessShareLock);
char replicaIdentity = relation->rd_rel->relreplident;
@ -1202,7 +1212,7 @@ pg_get_replica_identity_command(Oid tableRelationId)
relationName);
}
heap_close(relation, AccessShareLock);
table_close(relation, AccessShareLock);
return (buf->len > 0) ? buf->data : NULL;
}

View File

@ -15,12 +15,14 @@
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_config.h"
#if (PG_VERSION_NUM >= PG_VERSION_11) && (PG_VERSION_NUM < PG_VERSION_12)
#include "postgres.h"
#include <ctype.h>
#include <unistd.h>
#include <fcntl.h>

View File

@ -14,12 +14,14 @@
* This needs to be closely in sync with the core code.
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_config.h"
#if (PG_VERSION_NUM >= PG_VERSION_12) && (PG_VERSION_NUM < PG_VERSION_13)
#include "postgres.h"
#include <ctype.h>
#include <unistd.h>
#include <fcntl.h>

File diff suppressed because it is too large Load Diff

View File

@ -34,6 +34,7 @@
#include "distributed/shardinterval_utils.h"
#include "distributed/subplan_execution.h"
#include "distributed/transaction_management.h"
#include "distributed/version_compat.h"
#include "executor/executor.h"
#include "nodes/execnodes.h"
#include "nodes/makefuncs.h"
@ -345,9 +346,10 @@ WrapSubquery(Query *subquery)
/* create range table entries */
Alias *selectAlias = makeAlias("citus_insert_select_subquery", NIL);
RangeTblEntry *newRangeTableEntry = addRangeTableEntryForSubquery(pstate, subquery,
selectAlias, false,
true);
RangeTblEntry *newRangeTableEntry = RangeTableEntryFromNSItem(
addRangeTableEntryForSubquery(
pstate, subquery,
selectAlias, false, true));
outerQuery->rtable = list_make1(newRangeTableEntry);
/* set the FROM expression to the subquery */

View File

@ -94,6 +94,7 @@
#include "distributed/relation_access_tracking.h"
#include "distributed/remote_commands.h" /* to access LogRemoteCommands */
#include "distributed/transaction_management.h"
#include "distributed/version_compat.h"
#include "distributed/worker_protocol.h"
#include "executor/tstoreReceiver.h"
#include "executor/tuptable.h"
@ -293,7 +294,7 @@ ExecuteLocalTaskListExtended(List *taskList,
* implemented. So, let planner to call distributed_planner() which
* eventually calls standard_planner().
*/
localPlan = planner(shardQuery, cursorOptions, paramListInfo);
localPlan = planner_compat(shardQuery, cursorOptions, paramListInfo);
}
char *shardQueryString = NULL;
@ -333,7 +334,8 @@ LocallyPlanAndExecuteMultipleQueries(List *queryStrings, TupleDestination *tuple
0);
int cursorOptions = 0;
ParamListInfo paramListInfo = NULL;
PlannedStmt *localPlan = planner(shardQuery, cursorOptions, paramListInfo);
PlannedStmt *localPlan = planner_compat(shardQuery, cursorOptions,
paramListInfo);
totalProcessedRows += ExecuteLocalTaskPlan(localPlan, queryString,
tupleDest, task,
paramListInfo);

View File

@ -32,6 +32,7 @@
#include "distributed/multi_server_executor.h"
#include "distributed/resource_lock.h"
#include "distributed/transaction_management.h"
#include "distributed/version_compat.h"
#include "distributed/worker_shard_visibility.h"
#include "distributed/worker_protocol.h"
#include "executor/execdebug.h"
@ -604,7 +605,7 @@ ExecuteQueryIntoDestReceiver(Query *query, ParamListInfo params, DestReceiver *d
}
/* plan the subquery, this may be another distributed query */
PlannedStmt *queryPlan = pg_plan_query(query, cursorOptions, params);
PlannedStmt *queryPlan = pg_plan_query_compat(query, NULL, cursorOptions, params);
ExecutePlanIntoDestReceiver(queryPlan, params, dest);
}
@ -630,7 +631,7 @@ ExecutePlanIntoDestReceiver(PlannedStmt *queryPlan, ParamListInfo params,
PortalDefineQuery(portal,
NULL,
"",
"SELECT",
CMDTAG_SELECT_COMPAT,
list_make1(queryPlan),
NULL);

View File

@ -26,6 +26,7 @@
#include "distributed/pg_dist_shard.h"
#include "distributed/remote_commands.h"
#include "distributed/tuplestore.h"
#include "distributed/version_compat.h"
#include "distributed/worker_protocol.h"
#include "nodes/makefuncs.h"
#include "nodes/primnodes.h"
@ -258,14 +259,15 @@ StartPortalForQueryExecution(const char *queryString)
Query *query = ParseQueryString(queryString, NULL, 0);
int cursorOptions = CURSOR_OPT_PARALLEL_OK;
PlannedStmt *queryPlan = pg_plan_query(query, cursorOptions, NULL);
PlannedStmt *queryPlan = pg_plan_query_compat(query, NULL, cursorOptions, NULL);
Portal portal = CreateNewPortal();
/* don't display the portal in pg_cursors, it is for internal use only */
portal->visible = false;
PortalDefineQuery(portal, NULL, queryString, "SELECT", list_make1(queryPlan), NULL);
PortalDefineQuery(portal, NULL, queryString, CMDTAG_SELECT_COMPAT,
list_make1(queryPlan), NULL);
int eflags = 0;
PortalStart(portal, NULL, eflags, GetActiveSnapshot());

View File

@ -25,6 +25,7 @@
#include "distributed/metadata/dependency.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata_cache.h"
#include "distributed/version_compat.h"
#include "miscadmin.h"
#include "utils/fmgroids.h"
#include "utils/hsearch.h"
@ -304,7 +305,7 @@ DependencyDefinitionFromPgDepend(ObjectAddress target)
/*
* iterate the actual pg_depend catalog
*/
Relation depRel = heap_open(DependRelationId, AccessShareLock);
Relation depRel = table_open(DependRelationId, AccessShareLock);
/* scan pg_depend for classid = $1 AND objid = $2 using pg_depend_depender_index */
ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ,
@ -346,7 +347,7 @@ DependencyDefinitionFromPgShDepend(ObjectAddress target)
/*
* iterate the actual pg_shdepend catalog
*/
Relation shdepRel = heap_open(SharedDependRelationId, AccessShareLock);
Relation shdepRel = table_open(SharedDependRelationId, AccessShareLock);
/*
* Scan pg_shdepend for dbid = $1 AND classid = $2 AND objid = $3 using
@ -621,7 +622,7 @@ IsObjectAddressOwnedByExtension(const ObjectAddress *target,
HeapTuple depTup = NULL;
bool result = false;
Relation depRel = heap_open(DependRelationId, AccessShareLock);
Relation depRel = table_open(DependRelationId, AccessShareLock);
/* scan pg_depend for classid = $1 AND objid = $2 using pg_depend_depender_index */
ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ,
@ -647,7 +648,7 @@ IsObjectAddressOwnedByExtension(const ObjectAddress *target,
}
systable_endscan(depScan);
heap_close(depRel, AccessShareLock);
table_close(depRel, AccessShareLock);
return result;
}

View File

@ -31,6 +31,7 @@
#include "distributed/metadata/distobject.h"
#include "distributed/metadata/pg_dist_object.h"
#include "distributed/metadata_cache.h"
#include "distributed/version_compat.h"
#include "executor/spi.h"
#include "nodes/makefuncs.h"
#include "nodes/pg_list.h"
@ -103,7 +104,7 @@ ObjectExists(const ObjectAddress *address)
if (is_objectclass_supported(address->classId))
{
HeapTuple objtup;
Relation catalog = heap_open(address->classId, AccessShareLock);
Relation catalog = table_open(address->classId, AccessShareLock);
#if PG_VERSION_NUM >= PG_VERSION_12
objtup = get_catalog_object_by_oid(catalog, get_object_attnum_oid(
@ -111,7 +112,7 @@ ObjectExists(const ObjectAddress *address)
#else
objtup = get_catalog_object_by_oid(catalog, address->objectId);
#endif
heap_close(catalog, AccessShareLock);
table_close(catalog, AccessShareLock);
if (objtup != NULL)
{
return true;
@ -257,7 +258,7 @@ IsObjectDistributed(const ObjectAddress *address)
ScanKeyData key[3];
bool result = false;
Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock);
Relation pgDistObjectRel = table_open(DistObjectRelationId(), AccessShareLock);
/* scan pg_dist_object for classid = $1 AND objid = $2 AND objsubid = $3 via index */
ScanKeyInit(&key[0], Anum_pg_dist_object_classid, BTEqualStrategyNumber, F_OIDEQ,
@ -295,7 +296,7 @@ ClusterHasDistributedFunctionWithDistArgument(void)
HeapTuple pgDistObjectTup = NULL;
Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock);
Relation pgDistObjectRel = table_open(DistObjectRelationId(), AccessShareLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistObjectRel);
@ -340,7 +341,7 @@ GetDistributedObjectAddressList(void)
HeapTuple pgDistObjectTup = NULL;
List *objectAddressList = NIL;
Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock);
Relation pgDistObjectRel = table_open(DistObjectRelationId(), AccessShareLock);
SysScanDesc pgDistObjectScan = systable_beginscan(pgDistObjectRel, InvalidOid, false,
NULL, 0,
NULL);

View File

@ -65,6 +65,9 @@
#include "utils/datum.h"
#include "utils/elog.h"
#include "utils/hsearch.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h"
#endif
#include "utils/inval.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
@ -323,7 +326,7 @@ IsCitusTableViaCatalog(Oid relationId)
ScanKeyData scanKey[1];
bool indexOK = true;
Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock);
Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid,
BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId));
@ -334,7 +337,7 @@ IsCitusTableViaCatalog(Oid relationId)
HeapTuple partitionTuple = systable_getnext(scanDescriptor);
systable_endscan(scanDescriptor);
heap_close(pgDistPartition, AccessShareLock);
table_close(pgDistPartition, AccessShareLock);
return HeapTupleIsValid(partitionTuple);
}
@ -1006,7 +1009,7 @@ LookupDistObjectCacheEntry(Oid classid, Oid objid, int32 objsubid)
cacheEntry->key.objid = objid;
cacheEntry->key.objsubid = objsubid;
Relation pgDistObjectRel = heap_open(DistObjectRelationId(), AccessShareLock);
Relation pgDistObjectRel = table_open(DistObjectRelationId(), AccessShareLock);
TupleDesc pgDistObjectTupleDesc = RelationGetDescr(pgDistObjectRel);
ScanKeyInit(&pgDistObjectKey[0], Anum_pg_dist_object_classid,
@ -1059,14 +1062,14 @@ LookupDistObjectCacheEntry(Oid classid, Oid objid, int32 objsubid)
static CitusTableCacheEntry *
BuildCitusTableCacheEntry(Oid relationId)
{
Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock);
Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock);
HeapTuple distPartitionTuple =
LookupDistPartitionTuple(pgDistPartition, relationId);
if (distPartitionTuple == NULL)
{
/* not a distributed table, done */
heap_close(pgDistPartition, NoLock);
table_close(pgDistPartition, NoLock);
return NULL;
}
@ -1166,7 +1169,7 @@ BuildCitusTableCacheEntry(Oid relationId)
MemoryContextSwitchTo(oldContext);
heap_close(pgDistPartition, NoLock);
table_close(pgDistPartition, NoLock);
cacheEntry->isValid = true;
@ -1201,7 +1204,7 @@ BuildCachedShardList(CitusTableCacheEntry *cacheEntry)
int shardIntervalArrayLength = list_length(distShardTupleList);
if (shardIntervalArrayLength > 0)
{
Relation distShardRelation = heap_open(DistShardRelationId(), AccessShareLock);
Relation distShardRelation = table_open(DistShardRelationId(), AccessShareLock);
TupleDesc distShardTupleDesc = RelationGetDescr(distShardRelation);
int arrayIndex = 0;
@ -1236,7 +1239,7 @@ BuildCachedShardList(CitusTableCacheEntry *cacheEntry)
arrayIndex++;
}
heap_close(distShardRelation, AccessShareLock);
table_close(distShardRelation, AccessShareLock);
}
/* look up value comparison function */
@ -1847,7 +1850,7 @@ InstalledExtensionVersion(void)
InitializeCaches();
Relation relation = heap_open(ExtensionRelationId, AccessShareLock);
Relation relation = table_open(ExtensionRelationId, AccessShareLock);
ScanKeyInit(&entry[0], Anum_pg_extension_extname, BTEqualStrategyNumber, F_NAMEEQ,
CStringGetDatum("citus"));
@ -1889,7 +1892,7 @@ InstalledExtensionVersion(void)
systable_endscan(scandesc);
heap_close(relation, AccessShareLock);
table_close(relation, AccessShareLock);
return installedExtensionVersion;
}
@ -2400,7 +2403,7 @@ CitusExtensionOwner(void)
return MetadataCache.extensionOwner;
}
Relation relation = heap_open(ExtensionRelationId, AccessShareLock);
Relation relation = table_open(ExtensionRelationId, AccessShareLock);
ScanKeyInit(&entry[0],
Anum_pg_extension_extname,
@ -2440,7 +2443,7 @@ CitusExtensionOwner(void)
systable_endscan(scandesc);
heap_close(relation, AccessShareLock);
table_close(relation, AccessShareLock);
return MetadataCache.extensionOwner;
}
@ -3228,7 +3231,7 @@ GetLocalGroupId(void)
return 0;
}
Relation pgDistLocalGroupId = heap_open(localGroupTableOid, AccessShareLock);
Relation pgDistLocalGroupId = table_open(localGroupTableOid, AccessShareLock);
SysScanDesc scanDescriptor = systable_beginscan(pgDistLocalGroupId,
InvalidOid, false,
@ -3260,7 +3263,7 @@ GetLocalGroupId(void)
}
systable_endscan(scanDescriptor);
heap_close(pgDistLocalGroupId, AccessShareLock);
table_close(pgDistLocalGroupId, AccessShareLock);
return groupId;
}
@ -3671,7 +3674,7 @@ DistTableOidList(void)
int scanKeyCount = 0;
List *distTableOidList = NIL;
Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock);
Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock);
SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition,
InvalidOid, false,
@ -3693,7 +3696,7 @@ DistTableOidList(void)
}
systable_endscan(scanDescriptor);
heap_close(pgDistPartition, AccessShareLock);
table_close(pgDistPartition, AccessShareLock);
return distTableOidList;
}
@ -3713,7 +3716,7 @@ ReferenceTableOidList()
int scanKeyCount = 0;
List *referenceTableOidList = NIL;
Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock);
Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock);
SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition,
InvalidOid, false,
@ -3742,7 +3745,7 @@ ReferenceTableOidList()
}
systable_endscan(scanDescriptor);
heap_close(pgDistPartition, AccessShareLock);
table_close(pgDistPartition, AccessShareLock);
return referenceTableOidList;
}
@ -3856,7 +3859,7 @@ LookupDistShardTuples(Oid relationId)
List *distShardTupleList = NIL;
ScanKeyData scanKey[1];
Relation pgDistShard = heap_open(DistShardRelationId(), AccessShareLock);
Relation pgDistShard = table_open(DistShardRelationId(), AccessShareLock);
/* copy scankey to local copy, it will be modified during the scan */
scanKey[0] = DistShardScanKey[0];
@ -3878,7 +3881,7 @@ LookupDistShardTuples(Oid relationId)
}
systable_endscan(scanDescriptor);
heap_close(pgDistShard, AccessShareLock);
table_close(pgDistShard, AccessShareLock);
return distShardTupleList;
}
@ -3896,7 +3899,7 @@ LookupShardRelationFromCatalog(int64 shardId, bool missingOk)
ScanKeyData scanKey[1];
int scanKeyCount = 1;
Form_pg_dist_shard shardForm = NULL;
Relation pgDistShard = heap_open(DistShardRelationId(), AccessShareLock);
Relation pgDistShard = table_open(DistShardRelationId(), AccessShareLock);
Oid relationId = InvalidOid;
ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid,
@ -3924,7 +3927,7 @@ LookupShardRelationFromCatalog(int64 shardId, bool missingOk)
}
systable_endscan(scanDescriptor);
heap_close(pgDistShard, NoLock);
table_close(pgDistShard, NoLock);
return relationId;
}
@ -4206,7 +4209,7 @@ CitusInvalidateRelcacheByShardId(int64 shardId)
ScanKeyData scanKey[1];
int scanKeyCount = 1;
Form_pg_dist_shard shardForm = NULL;
Relation pgDistShard = heap_open(DistShardRelationId(), AccessShareLock);
Relation pgDistShard = table_open(DistShardRelationId(), AccessShareLock);
/*
* Load shard, to find the associated relation id. Can't use
@ -4249,7 +4252,7 @@ CitusInvalidateRelcacheByShardId(int64 shardId)
}
systable_endscan(scanDescriptor);
heap_close(pgDistShard, NoLock);
table_close(pgDistShard, NoLock);
/* bump command counter, to force invalidation to take effect */
CommandCounterIncrement();
@ -4274,7 +4277,7 @@ DistNodeMetadata(void)
ereport(ERROR, (errmsg("pg_dist_node_metadata was not found")));
}
Relation pgDistNodeMetadata = heap_open(metadataTableOid, AccessShareLock);
Relation pgDistNodeMetadata = table_open(metadataTableOid, AccessShareLock);
SysScanDesc scanDescriptor = systable_beginscan(pgDistNodeMetadata,
InvalidOid, false,
NULL, scanKeyCount, scanKey);
@ -4295,7 +4298,7 @@ DistNodeMetadata(void)
}
systable_endscan(scanDescriptor);
heap_close(pgDistNodeMetadata, AccessShareLock);
table_close(pgDistNodeMetadata, AccessShareLock);
return metadata;
}

View File

@ -989,7 +989,7 @@ UpdateDistNodeBoolAttr(const char *nodeName, int32 nodePort, int attrNum, bool v
bool isnull[Natts_pg_dist_node];
bool replace[Natts_pg_dist_node];
Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
Relation pgDistNode = table_open(DistNodeRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode);
ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodename,
@ -1022,7 +1022,7 @@ UpdateDistNodeBoolAttr(const char *nodeName, int32 nodePort, int attrNum, bool v
CommandCounterIncrement();
systable_endscan(scanDescriptor);
heap_close(pgDistNode, NoLock);
table_close(pgDistNode, NoLock);
}
@ -1038,7 +1038,7 @@ List *
SequenceDDLCommandsForTable(Oid relationId)
{
List *sequenceDDLList = NIL;
List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber);
List *ownedSequences = getOwnedSequencesCompat(relationId, InvalidAttrNumber);
char *ownerName = TableOwner(relationId);
Oid sequenceOid = InvalidOid;

View File

@ -190,7 +190,7 @@ DistributedTableSize(Oid relationId, char *sizeQuery)
totalRelationSize += relationSizeOnNode;
}
heap_close(relation, AccessShareLock);
table_close(relation, AccessShareLock);
return totalRelationSize;
}
@ -633,8 +633,8 @@ NodeGroupHasShardPlacements(int32 groupId, bool onlyConsiderActivePlacements)
ScanKeyData scanKey[2];
Relation pgPlacement = heap_open(DistPlacementRelationId(),
AccessShareLock);
Relation pgPlacement = table_open(DistPlacementRelationId(),
AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_groupid,
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(groupId));
@ -654,7 +654,7 @@ NodeGroupHasShardPlacements(int32 groupId, bool onlyConsiderActivePlacements)
bool hasActivePlacements = HeapTupleIsValid(heapTuple);
systable_endscan(scanDescriptor);
heap_close(pgPlacement, NoLock);
table_close(pgPlacement, NoLock);
return hasActivePlacements;
}
@ -731,7 +731,7 @@ BuildShardPlacementList(ShardInterval *shardInterval)
int scanKeyCount = 1;
bool indexOK = true;
Relation pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock);
Relation pgPlacement = table_open(DistPlacementRelationId(), AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_shardid,
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId));
@ -755,7 +755,7 @@ BuildShardPlacementList(ShardInterval *shardInterval)
}
systable_endscan(scanDescriptor);
heap_close(pgPlacement, NoLock);
table_close(pgPlacement, NoLock);
return shardPlacementList;
}
@ -774,7 +774,7 @@ AllShardPlacementsOnNodeGroup(int32 groupId)
int scanKeyCount = 1;
bool indexOK = true;
Relation pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock);
Relation pgPlacement = table_open(DistPlacementRelationId(), AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_groupid,
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(groupId));
@ -798,7 +798,7 @@ AllShardPlacementsOnNodeGroup(int32 groupId)
}
systable_endscan(scanDescriptor);
heap_close(pgPlacement, NoLock);
table_close(pgPlacement, NoLock);
return shardPlacementList;
}
@ -879,7 +879,7 @@ InsertShardRow(Oid relationId, uint64 shardId, char storageType,
}
/* open shard relation and insert new tuple */
Relation pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock);
Relation pgDistShard = table_open(DistShardRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistShard);
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
@ -890,7 +890,7 @@ InsertShardRow(Oid relationId, uint64 shardId, char storageType,
CitusInvalidateRelcacheByRelid(relationId);
CommandCounterIncrement();
heap_close(pgDistShard, NoLock);
table_close(pgDistShard, NoLock);
}
@ -923,7 +923,7 @@ InsertShardPlacementRow(uint64 shardId, uint64 placementId,
values[Anum_pg_dist_placement_groupid - 1] = Int32GetDatum(groupId);
/* open shard placement relation and insert new tuple */
Relation pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock);
Relation pgDistPlacement = table_open(DistPlacementRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPlacement);
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
@ -933,7 +933,7 @@ InsertShardPlacementRow(uint64 shardId, uint64 placementId,
CitusInvalidateRelcacheByShardId(shardId);
CommandCounterIncrement();
heap_close(pgDistPlacement, NoLock);
table_close(pgDistPlacement, NoLock);
return placementId;
}
@ -953,7 +953,7 @@ InsertIntoPgDistPartition(Oid relationId, char distributionMethod,
bool newNulls[Natts_pg_dist_partition];
/* open system catalog and insert new tuple */
Relation pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock);
Relation pgDistPartition = table_open(DistPartitionRelationId(), RowExclusiveLock);
/* form new tuple for pg_dist_partition */
memset(newValues, 0, sizeof(newValues));
@ -991,7 +991,7 @@ InsertIntoPgDistPartition(Oid relationId, char distributionMethod,
RecordDistributedRelationDependencies(relationId);
CommandCounterIncrement();
heap_close(pgDistPartition, NoLock);
table_close(pgDistPartition, NoLock);
}
@ -1038,7 +1038,7 @@ DeletePartitionRow(Oid distributedRelationId)
ScanKeyData scanKey[1];
int scanKeyCount = 1;
Relation pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock);
Relation pgDistPartition = table_open(DistPartitionRelationId(), RowExclusiveLock);
ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid,
BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(distributedRelationId));
@ -1064,7 +1064,7 @@ DeletePartitionRow(Oid distributedRelationId)
/* increment the counter so that next command can see the row */
CommandCounterIncrement();
heap_close(pgDistPartition, NoLock);
table_close(pgDistPartition, NoLock);
}
@ -1079,7 +1079,7 @@ DeleteShardRow(uint64 shardId)
int scanKeyCount = 1;
bool indexOK = true;
Relation pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock);
Relation pgDistShard = table_open(DistShardRelationId(), RowExclusiveLock);
ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid,
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId));
@ -1106,7 +1106,7 @@ DeleteShardRow(uint64 shardId)
CitusInvalidateRelcacheByRelid(distributedRelationId);
CommandCounterIncrement();
heap_close(pgDistShard, NoLock);
table_close(pgDistShard, NoLock);
}
@ -1122,7 +1122,7 @@ DeleteShardPlacementRow(uint64 placementId)
bool indexOK = true;
bool isNull = false;
Relation pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock);
Relation pgDistPlacement = table_open(DistPlacementRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPlacement);
ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_placementid,
@ -1154,7 +1154,7 @@ DeleteShardPlacementRow(uint64 placementId)
CitusInvalidateRelcacheByShardId(shardId);
CommandCounterIncrement();
heap_close(pgDistPlacement, NoLock);
table_close(pgDistPlacement, NoLock);
}
@ -1251,7 +1251,7 @@ UpdateShardPlacementState(uint64 placementId, char shardState)
bool replace[Natts_pg_dist_placement];
bool colIsNull = false;
Relation pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock);
Relation pgDistPlacement = table_open(DistPlacementRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPlacement);
ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_placementid,
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(placementId));
@ -1288,7 +1288,7 @@ UpdateShardPlacementState(uint64 placementId, char shardState)
CommandCounterIncrement();
systable_endscan(scanDescriptor);
heap_close(pgDistPlacement, NoLock);
table_close(pgDistPlacement, NoLock);
}

View File

@ -41,6 +41,7 @@
#include "distributed/resource_lock.h"
#include "distributed/shardinterval_utils.h"
#include "distributed/shared_connection_stats.h"
#include "distributed/version_compat.h"
#include "distributed/worker_manager.h"
#include "distributed/worker_transaction.h"
#include "lib/stringinfo.h"
@ -798,7 +799,7 @@ UpdateNodeLocation(int32 nodeId, char *newNodeName, int32 newNodePort)
bool isnull[Natts_pg_dist_node];
bool replace[Natts_pg_dist_node];
Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
Relation pgDistNode = table_open(DistNodeRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode);
ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodeid,
@ -834,7 +835,7 @@ UpdateNodeLocation(int32 nodeId, char *newNodeName, int32 newNodePort)
CommandCounterIncrement();
systable_endscan(scanDescriptor);
heap_close(pgDistNode, NoLock);
table_close(pgDistNode, NoLock);
}
@ -978,7 +979,7 @@ FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePort)
{
WorkerNode *workerNode = NULL;
Relation pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock);
Relation pgDistNode = table_open(DistNodeRelationId(), AccessShareLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode);
HeapTuple heapTuple = GetNodeTuple(nodeName, nodePort);
@ -987,7 +988,7 @@ FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePort)
workerNode = TupleToWorkerNode(tupleDescriptor, heapTuple);
}
heap_close(pgDistNode, NoLock);
table_close(pgDistNode, NoLock);
return workerNode;
}
@ -1007,7 +1008,7 @@ ReadDistNode(bool includeNodesFromOtherClusters)
int scanKeyCount = 0;
List *workerNodeList = NIL;
Relation pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock);
Relation pgDistNode = table_open(DistNodeRelationId(), AccessShareLock);
SysScanDesc scanDescriptor = systable_beginscan(pgDistNode,
InvalidOid, false,
@ -1031,7 +1032,7 @@ ReadDistNode(bool includeNodesFromOtherClusters)
}
systable_endscan(scanDescriptor);
heap_close(pgDistNode, NoLock);
table_close(pgDistNode, NoLock);
return workerNodeList;
}
@ -1208,7 +1209,7 @@ AddNodeMetadata(char *nodeName, int32 nodePort,
static WorkerNode *
SetWorkerColumn(WorkerNode *workerNode, int columnIndex, Datum value)
{
Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
Relation pgDistNode = table_open(DistNodeRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode);
HeapTuple heapTuple = GetNodeTuple(workerNode->workerName, workerNode->workerPort);
@ -1261,7 +1262,7 @@ SetWorkerColumn(WorkerNode *workerNode, int columnIndex, Datum value)
WorkerNode *newWorkerNode = TupleToWorkerNode(tupleDescriptor, heapTuple);
heap_close(pgDistNode, NoLock);
table_close(pgDistNode, NoLock);
/* we also update the column at worker nodes */
SendCommandToWorkersWithMetadata(metadataSyncCommand);
@ -1305,7 +1306,7 @@ SetNodeState(char *nodeName, int nodePort, bool isActive)
static HeapTuple
GetNodeTuple(const char *nodeName, int32 nodePort)
{
Relation pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock);
Relation pgDistNode = table_open(DistNodeRelationId(), AccessShareLock);
const int scanKeyCount = 2;
const bool indexOK = false;
@ -1326,7 +1327,7 @@ GetNodeTuple(const char *nodeName, int32 nodePort)
}
systable_endscan(scanDescriptor);
heap_close(pgDistNode, NoLock);
table_close(pgDistNode, NoLock);
return nodeTuple;
}
@ -1448,7 +1449,7 @@ InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, NodeMetadata *nodeMeta
values[Anum_pg_dist_node_shouldhaveshards - 1] = BoolGetDatum(
nodeMetadata->shouldHaveShards);
Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
Relation pgDistNode = table_open(DistNodeRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode);
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
@ -1461,7 +1462,7 @@ InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, NodeMetadata *nodeMeta
CommandCounterIncrement();
/* close relation */
heap_close(pgDistNode, NoLock);
table_close(pgDistNode, NoLock);
}
@ -1475,7 +1476,7 @@ DeleteNodeRow(char *nodeName, int32 nodePort)
bool indexOK = false;
ScanKeyData scanKey[2];
Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock);
Relation pgDistNode = table_open(DistNodeRelationId(), RowExclusiveLock);
/*
* simple_heap_delete() expects that the caller has at least an
@ -1510,8 +1511,8 @@ DeleteNodeRow(char *nodeName, int32 nodePort)
/* increment the counter so that next command won't see the row */
CommandCounterIncrement();
heap_close(replicaIndex, AccessShareLock);
heap_close(pgDistNode, NoLock);
table_close(replicaIndex, AccessShareLock);
table_close(pgDistNode, NoLock);
}
@ -1628,7 +1629,7 @@ UnsetMetadataSyncedForAll(void)
* pg_dist_node in different orders. To protect against deadlock, we
* get an exclusive lock here.
*/
Relation relation = heap_open(DistNodeRelationId(), ExclusiveLock);
Relation relation = table_open(DistNodeRelationId(), ExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(relation);
ScanKeyInit(&scanKey[0], Anum_pg_dist_node_hasmetadata,
BTEqualStrategyNumber, F_BOOLEQ, BoolGetDatum(true));
@ -1676,7 +1677,7 @@ UnsetMetadataSyncedForAll(void)
systable_endscan(scanDescriptor);
CatalogCloseIndexes(indstate);
heap_close(relation, NoLock);
table_close(relation, NoLock);
return updatedAtLeastOne;
}

View File

@ -48,6 +48,7 @@
#include "distributed/metadata_sync.h"
#include "distributed/namespace_utils.h"
#include "distributed/pg_dist_shard.h"
#include "distributed/version_compat.h"
#include "distributed/worker_manager.h"
#include "foreign/foreign.h"
#include "lib/stringinfo.h"
@ -75,6 +76,7 @@ int NextPlacementId = 0;
static List * GetTableReplicaIdentityCommand(Oid relationId);
static Datum WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescriptor);
/* exports for SQL callable functions */
PG_FUNCTION_INFO_V1(master_get_table_metadata);
PG_FUNCTION_INFO_V1(master_get_table_ddl_events);
@ -221,8 +223,10 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS)
/* allocate DDL statements, and then save position in DDL statements */
List *tableDDLEventList = GetTableDDLEvents(relationId, includeSequenceDefaults);
tableDDLEventCell = list_head(tableDDLEventList);
functionContext->user_fctx = tableDDLEventCell;
ListCellAndListWrapper *wrapper = palloc0(sizeof(ListCellAndListWrapper));
wrapper->list = tableDDLEventList;
wrapper->listCell = tableDDLEventCell;
functionContext->user_fctx = wrapper;
MemoryContextSwitchTo(oldContext);
}
@ -235,13 +239,14 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS)
*/
functionContext = SRF_PERCALL_SETUP();
tableDDLEventCell = (ListCell *) functionContext->user_fctx;
if (tableDDLEventCell != NULL)
ListCellAndListWrapper *wrapper =
(ListCellAndListWrapper *) functionContext->user_fctx;
if (wrapper->listCell != NULL)
{
char *ddlStatement = (char *) lfirst(tableDDLEventCell);
char *ddlStatement = (char *) lfirst(wrapper->listCell);
text *ddlStatementText = cstring_to_text(ddlStatement);
functionContext->user_fctx = lnext(tableDDLEventCell);
wrapper->listCell = lnext_compat(wrapper->list, wrapper->listCell);
SRF_RETURN_NEXT(functionContext, PointerGetDatum(ddlStatementText));
}
@ -645,7 +650,7 @@ GetTableIndexAndConstraintCommands(Oid relationId)
PushOverrideEmptySearchPath(CurrentMemoryContext);
/* open system catalog and scan all indexes that belong to this table */
Relation pgIndex = heap_open(IndexRelationId, AccessShareLock);
Relation pgIndex = table_open(IndexRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_index_indrelid,
BTEqualStrategyNumber, F_OIDEQ, relationId);
@ -693,7 +698,7 @@ GetTableIndexAndConstraintCommands(Oid relationId)
/* clean up scan and close system catalog */
systable_endscan(scanDescriptor);
heap_close(pgIndex, AccessShareLock);
table_close(pgIndex, AccessShareLock);
/* revert back to original search_path */
PopOverrideSearchPath();

View File

@ -93,7 +93,7 @@ RemoteScanTargetList(List *workerTargetList)
Var *remoteScanColumn = makeVarFromTargetEntry(tableId, workerTargetEntry);
remoteScanColumn->varattno = columnId;
remoteScanColumn->varoattno = columnId;
remoteScanColumn->varattnosyn = columnId;
columnId++;
if (remoteScanColumn->vartype == RECORDOID || remoteScanColumn->vartype ==
@ -295,7 +295,7 @@ BuildSelectStatementViaStdPlanner(Query *combineQuery, List *remoteScanTargetLis
ReplaceCitusExtraDataContainer = true;
ReplaceCitusExtraDataContainerWithCustomScan = remoteScan;
standardStmt = standard_planner(combineQuery, 0, NULL);
standardStmt = standard_planner_compat(combineQuery, 0, NULL);
ReplaceCitusExtraDataContainer = false;
ReplaceCitusExtraDataContainerWithCustomScan = NULL;

View File

@ -358,7 +358,7 @@ UpdateRelationsToLocalShardTables(Node *node, List *relationShardList)
static void
ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte)
{
Relation relation = heap_open(rte->relid, NoLock);
Relation relation = table_open(rte->relid, NoLock);
TupleDesc tupleDescriptor = RelationGetDescr(relation);
int columnCount = tupleDescriptor->natts;
List *targetList = NIL;
@ -388,7 +388,7 @@ ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte)
targetList = lappend(targetList, targetEntry);
}
heap_close(relation, NoLock);
table_close(relation, NoLock);
FromExpr *joinTree = makeNode(FromExpr);
joinTree->quals = makeBoolConst(false, false);

View File

@ -127,7 +127,12 @@ static PlannedStmt * PlanDistributedStmt(DistributedPlanningContext *planContext
/* Distributed planner hook */
PlannedStmt *
distributed_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
distributed_planner(Query *parse,
#if PG_VERSION_NUM >= PG_VERSION_13
const char *query_string,
#endif
int cursorOptions,
ParamListInfo boundParams)
{
bool needsDistributedPlanning = false;
bool fastPathRouterQuery = false;
@ -217,9 +222,9 @@ distributed_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
* restriction information per table and parse tree transformations made by
* postgres' planner.
*/
planContext.plan = standard_planner(planContext.query,
planContext.cursorOptions,
planContext.boundParams);
planContext.plan = standard_planner_compat(planContext.query,
planContext.cursorOptions,
planContext.boundParams);
if (needsDistributedPlanning)
{
result = PlanDistributedStmt(&planContext, rteIdCounter);
@ -1047,7 +1052,7 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi
* being contiguous.
*/
standard_planner(newQuery, 0, boundParams);
standard_planner_compat(newQuery, 0, boundParams);
/* overwrite the old transformed query with the new transformed query */
*query = *newQuery;

View File

@ -514,7 +514,7 @@ CreateTargetListForCombineQuery(List *targetList)
Var *column = makeVarFromTargetEntry(masterTableId, originalTargetEntry);
column->varattno = columnId;
column->varoattno = columnId;
column->varattnosyn = columnId;
columnId++;
if (column->vartype == RECORDOID || column->vartype == RECORDARRAYOID)
@ -1388,8 +1388,8 @@ CreateNonPushableInsertSelectPlan(uint64 planId, Query *parse, ParamListInfo bou
/* plan the subquery, this may be another distributed query */
int cursorOptions = CURSOR_OPT_PARALLEL_OK;
PlannedStmt *selectPlan = pg_plan_query(selectQueryCopy, cursorOptions,
boundParams);
PlannedStmt *selectPlan = pg_plan_query_compat(selectQueryCopy, NULL, cursorOptions,
boundParams);
bool repartitioned = IsRedistributablePlan(selectPlan->planTree) &&
IsSupportedRedistributionTarget(targetRelationId);
@ -1494,7 +1494,7 @@ AddInsertSelectCasts(List *insertTargetList, List *selectTargetList,
*/
Assert(list_length(insertTargetList) <= list_length(selectTargetList));
Relation distributedRelation = heap_open(targetRelationId, RowExclusiveLock);
Relation distributedRelation = table_open(targetRelationId, RowExclusiveLock);
TupleDesc destTupleDescriptor = RelationGetDescr(distributedRelation);
int targetEntryIndex = 0;
@ -1579,7 +1579,7 @@ AddInsertSelectCasts(List *insertTargetList, List *selectTargetList,
selectTargetEntry->resno = entryResNo++;
}
heap_close(distributedRelation, NoLock);
table_close(distributedRelation, NoLock);
return selectTargetList;
}

View File

@ -21,6 +21,9 @@
#include "distributed/query_utils.h"
#include "distributed/worker_manager.h"
#include "utils/builtins.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h"
#endif
/* controlled via GUC, used mostly for testing */
bool LogIntermediateResults = false;
@ -370,18 +373,20 @@ RemoveLocalNodeFromWorkerList(List *workerNodeList)
int32 localGroupId = GetLocalGroupId();
ListCell *workerNodeCell = NULL;
#if PG_VERSION_NUM < PG_VERSION_13
ListCell *prev = NULL;
#endif
foreach(workerNodeCell, workerNodeList)
{
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
if (workerNode->groupId == localGroupId)
{
return list_delete_cell(workerNodeList, workerNodeCell, prev);
return list_delete_cell_compat(workerNodeList, workerNodeCell, prev);
}
prev = workerNodeCell;
}
#if PG_VERSION_NUM < PG_VERSION_13
prev = workerNodeCell;
#endif
return workerNodeList;
}

View File

@ -17,6 +17,7 @@
#include "distributed/deparse_shard_query.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/metadata_cache.h"
#include "distributed/version_compat.h"
#if PG_VERSION_NUM >= PG_VERSION_12
#include "optimizer/optimizer.h"
#else
@ -89,7 +90,7 @@ CacheLocalPlanForShardQuery(Task *task, DistributedPlan *originalDistributedPlan
LockRelationOid(rangeTableEntry->relid, lockMode);
LocalPlannedStatement *localPlannedStatement = CitusMakeNode(LocalPlannedStatement);
localPlan = planner(shardQuery, 0, NULL);
localPlan = planner_compat(shardQuery, 0, NULL);
localPlannedStatement->localPlan = localPlan;
localPlannedStatement->shardId = task->anchorShardId;
localPlannedStatement->localGroupId = GetLocalGroupId();

View File

@ -221,6 +221,7 @@ NonPushableInsertSelectExplainScan(CustomScanState *node, List *ancestors,
bool repartition = distributedPlan->insertSelectMethod == INSERT_SELECT_REPARTITION;
if (es->analyze)
{
ereport(ERROR, (errmsg("EXPLAIN ANALYZE is currently not supported for INSERT "
@ -271,7 +272,16 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
ParamListInfo params = NULL;
char *queryString = NULL;
instr_time planduration;
#if PG_VERSION_NUM >= PG_VERSION_13
BufferUsage bufusage_start,
bufusage;
if (es->buffers)
{
bufusage_start = pgBufferUsage;
}
#endif
if (es->format == EXPLAIN_FORMAT_TEXT)
{
char *resultId = GenerateResultId(planId, subPlan->subPlanId);
@ -313,7 +323,18 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es)
INSTR_TIME_SET_ZERO(planduration);
ExplainOnePlan(plan, into, es, queryString, params, NULL, &planduration);
#if PG_VERSION_NUM >= PG_VERSION_13
/* calc differences of buffer counters. */
if (es->buffers)
{
memset(&bufusage, 0, sizeof(BufferUsage));
BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
}
#endif
ExplainOnePlanCompat(plan, into, es, queryString, params, NULL, &planduration,
(es->buffers ? &bufusage : NULL));
if (es->format == EXPLAIN_FORMAT_TEXT)
{
@ -961,7 +982,7 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
INSTR_TIME_SET_CURRENT(planStart);
PlannedStmt *plan = pg_plan_query(query, 0, NULL);
PlannedStmt *plan = pg_plan_query_compat(query, NULL, 0, NULL);
INSTR_TIME_SET_CURRENT(planDuration);
INSTR_TIME_SUBTRACT(planDuration, planStart);
@ -1122,18 +1143,37 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into,
/* rest is copied from ExplainOneQuery() */
instr_time planstart,
planduration;
#if PG_VERSION_NUM >= PG_VERSION_13
BufferUsage bufusage_start,
bufusage;
if (es->buffers)
{
bufusage_start = pgBufferUsage;
}
#endif
INSTR_TIME_SET_CURRENT(planstart);
/* plan the query */
PlannedStmt *plan = pg_plan_query(query, cursorOptions, params);
PlannedStmt *plan = pg_plan_query_compat(query, NULL, cursorOptions, params);
INSTR_TIME_SET_CURRENT(planduration);
INSTR_TIME_SUBTRACT(planduration, planstart);
#if PG_VERSION_NUM >= PG_VERSION_13
/* calc differences of buffer counters. */
if (es->buffers)
{
memset(&bufusage, 0, sizeof(BufferUsage));
BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
}
#endif
/* run it (if needed) and produce output */
ExplainOnePlan(plan, into, es, queryString, params, queryEnv,
&planduration);
ExplainOnePlanCompat(plan, into, es, queryString, params, queryEnv,
&planduration, (es->buffers ? &bufusage : NULL));
}
@ -1453,18 +1493,33 @@ ExplainOneQuery(Query *query, int cursorOptions,
{
instr_time planstart,
planduration;
#if PG_VERSION_NUM >= PG_VERSION_13
BufferUsage bufusage_start,
bufusage;
if (es->buffers)
bufusage_start = pgBufferUsage;
#endif
INSTR_TIME_SET_CURRENT(planstart);
/* plan the query */
PlannedStmt *plan = pg_plan_query(query, cursorOptions, params);
PlannedStmt *plan = pg_plan_query_compat(query, NULL, cursorOptions, params);
INSTR_TIME_SET_CURRENT(planduration);
INSTR_TIME_SUBTRACT(planduration, planstart);
#if PG_VERSION_NUM >= PG_VERSION_13
/* calc differences of buffer counters. */
if (es->buffers)
{
memset(&bufusage, 0, sizeof(BufferUsage));
BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
}
#endif
/* run it (if needed) and produce output */
ExplainOnePlan(plan, into, es, queryString, params, queryEnv,
&planduration);
ExplainOnePlanCompat(plan, into, es, queryString, params, queryEnv,
&planduration, (es->buffers ? &bufusage : NULL));
}
}

View File

@ -1365,7 +1365,7 @@ PartitionColumn(Oid relationId, uint32 rangeTableId)
partitionColumn = partitionKey;
partitionColumn->varno = rangeTableId;
partitionColumn->varnoold = rangeTableId;
partitionColumn->varnosyn = rangeTableId;
return partitionColumn;
}

View File

@ -1438,7 +1438,7 @@ MasterExtendedOpNode(MultiExtendedOp *originalOpNode,
*/
Var *column = makeVarFromTargetEntry(masterTableId, originalTargetEntry);
column->varattno = walkerContext.columnId;
column->varoattno = walkerContext.columnId;
column->varattnosyn = walkerContext.columnId;
walkerContext.columnId++;
if (column->vartype == RECORDOID || column->vartype == RECORDARRAYOID)
@ -1673,9 +1673,9 @@ MasterAggregateExpression(Aggref *originalAggregate,
}
columnToUpdate->varno = masterTableId;
columnToUpdate->varnoold = masterTableId;
columnToUpdate->varnosyn = masterTableId;
columnToUpdate->varattno = startColumnCount + columnIndex;
columnToUpdate->varoattno = startColumnCount + columnIndex;
columnToUpdate->varattnosyn = startColumnCount + columnIndex;
}
/* we added that many columns */
@ -3564,7 +3564,7 @@ AggregateFunctionOid(const char *functionName, Oid inputType)
ScanKeyData scanKey[1];
int scanKeyCount = 1;
Relation procRelation = heap_open(ProcedureRelationId, AccessShareLock);
Relation procRelation = table_open(ProcedureRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_proc_proname,
BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(functionName));
@ -3605,7 +3605,7 @@ AggregateFunctionOid(const char *functionName, Oid inputType)
}
systable_endscan(scanDescriptor);
heap_close(procRelation, AccessShareLock);
table_close(procRelation, AccessShareLock);
return functionOid;
}

View File

@ -130,6 +130,9 @@ static List * QuerySelectClauseList(MultiNode *multiNode);
static List * QueryFromList(List *rangeTableList);
static Node * QueryJoinTree(MultiNode *multiNode, List *dependentJobList,
List **rangeTableList);
static void SetJoinRelatedColumnsCompat(RangeTblEntry *rangeTableEntry,
List *l_colnames, List *r_colnames,
List *leftColVars, List *rightColVars);
static RangeTblEntry * JoinRangeTableEntry(JoinExpr *joinExpr, List *dependentJobList,
List *rangeTableList);
static int ExtractRangeTableId(Node *node);
@ -1075,11 +1078,6 @@ QueryJoinTree(MultiNode *multiNode, List *dependentJobList, List **rangeTableLis
joinExpr->jointype = JOIN_LEFT;
}
RangeTblEntry *rangeTableEntry = JoinRangeTableEntry(joinExpr,
dependentJobList,
*rangeTableList);
*rangeTableList = lappend(*rangeTableList, rangeTableEntry);
/* fix the column attributes in ON (...) clauses */
List *columnList = pull_var_clause_default((Node *) joinNode->joinClauseList);
foreach(columnCell, columnList)
@ -1088,13 +1086,18 @@ QueryJoinTree(MultiNode *multiNode, List *dependentJobList, List **rangeTableLis
UpdateColumnAttributes(column, *rangeTableList, dependentJobList);
/* adjust our column old attributes for partition pruning to work */
column->varnoold = column->varno;
column->varoattno = column->varattno;
column->varnosyn = column->varno;
column->varattnosyn = column->varattno;
}
/* make AND clauses explicit after fixing them */
joinExpr->quals = (Node *) make_ands_explicit(joinNode->joinClauseList);
RangeTblEntry *rangeTableEntry = JoinRangeTableEntry(joinExpr,
dependentJobList,
*rangeTableList);
*rangeTableList = lappend(*rangeTableList, rangeTableEntry);
return (Node *) joinExpr;
}
@ -1228,10 +1231,10 @@ static RangeTblEntry *
JoinRangeTableEntry(JoinExpr *joinExpr, List *dependentJobList, List *rangeTableList)
{
RangeTblEntry *rangeTableEntry = makeNode(RangeTblEntry);
List *joinedColumnNames = NIL;
List *joinedColumnVars = NIL;
List *leftColumnNames = NIL;
List *leftColumnVars = NIL;
List *joinedColumnNames = NIL;
List *joinedColumnVars = NIL;
int leftRangeTableId = ExtractRangeTableId(joinExpr->larg);
RangeTblEntry *leftRTE = rt_fetch(leftRangeTableId, rangeTableList);
List *rightColumnNames = NIL;
@ -1251,19 +1254,46 @@ JoinRangeTableEntry(JoinExpr *joinExpr, List *dependentJobList, List *rangeTable
&leftColumnNames, &leftColumnVars);
ExtractColumns(rightRTE, rightRangeTableId, dependentJobList,
&rightColumnNames, &rightColumnVars);
joinedColumnNames = list_concat(joinedColumnNames, leftColumnNames);
joinedColumnVars = list_concat(joinedColumnVars, leftColumnVars);
joinedColumnNames = list_concat(joinedColumnNames, rightColumnNames);
joinedColumnVars = list_concat(joinedColumnVars, leftColumnVars);
joinedColumnVars = list_concat(joinedColumnVars, rightColumnVars);
rangeTableEntry->eref->colnames = joinedColumnNames;
rangeTableEntry->joinaliasvars = joinedColumnVars;
SetJoinRelatedColumnsCompat(rangeTableEntry,
leftColumnNames, rightColumnNames, leftColumnVars,
rightColumnVars);
return rangeTableEntry;
}
static void
SetJoinRelatedColumnsCompat(RangeTblEntry *rangeTableEntry,
List *leftColumnNames, List *rightColumnNames,
List *leftColumnVars, List *rightColumnVars)
{
#if PG_VERSION_NUM >= PG_VERSION_13
/* We don't have any merged columns so set it to 0 */
rangeTableEntry->joinmergedcols = 0;
int numvars = list_length(leftColumnVars);
for (int varId = 1; varId <= numvars; varId++)
{
rangeTableEntry->joinleftcols = lappend_int(rangeTableEntry->joinleftcols, varId);
}
numvars = list_length(rightColumnVars);
for (int varId = 1; varId <= numvars; varId++)
{
rangeTableEntry->joinrightcols = lappend_int(rangeTableEntry->joinrightcols,
varId);
}
#endif
}
/*
* ExtractRangeTableId gets the range table id from a node that could
* either be a JoinExpr or RangeTblRef.
@ -1531,8 +1561,8 @@ UpdateAllColumnAttributes(Node *columnContainer, List *rangeTableList,
static void
UpdateColumnAttributes(Var *column, List *rangeTableList, List *dependentJobList)
{
Index originalTableId = column->varnoold;
AttrNumber originalColumnId = column->varoattno;
Index originalTableId = column->varnosyn;
AttrNumber originalColumnId = column->varattnosyn;
/* find the new table identifier */
Index newTableId = NewTableId(originalTableId, rangeTableList);
@ -1616,8 +1646,8 @@ NewColumnId(Index originalTableId, AttrNumber originalColumnId,
* Check against the *old* values for this column, as the new values
* would have been updated already.
*/
if (column->varnoold == originalTableId &&
column->varoattno == originalColumnId)
if (column->varnosyn == originalTableId &&
column->varattnosyn == originalColumnId)
{
newColumnId = columnIndex;
break;
@ -2947,8 +2977,8 @@ AnchorRangeTableIdList(List *rangeTableList, List *baseRangeTableIdList)
/*
* AdjustColumnOldAttributes adjust the old tableId (varnoold) and old columnId
* (varoattno), and sets them equal to the new values. We need this adjustment
* AdjustColumnOldAttributes adjust the old tableId (varnosyn) and old columnId
* (varattnosyn), and sets them equal to the new values. We need this adjustment
* for partition pruning where we compare these columns with partition columns
* loaded from system catalogs. Since columns loaded from system catalogs always
* have the same old and new values, we also need to adjust column values here.
@ -2962,8 +2992,8 @@ AdjustColumnOldAttributes(List *expressionList)
foreach(columnCell, columnList)
{
Var *column = (Var *) lfirst(columnCell);
column->varnoold = column->varno;
column->varoattno = column->varattno;
column->varnosyn = column->varno;
column->varattnosyn = column->varattno;
}
}
@ -5141,7 +5171,7 @@ GreedyAssignTask(WorkerNode *workerNode, List *taskList, List *activeShardPlacem
rotatePlacementListBy = replicaIndex;
/* overwrite task list to signal that this task is assigned */
taskCell->data.ptr_value = NULL;
SetListCellPtr(taskCell, NULL);
break;
}
}

View File

@ -2976,8 +2976,7 @@ NormalizeMultiRowInsertTargetList(Query *query)
expandedValuesList = lappend(expandedValuesList, targetExpr);
}
valuesListCell->data.ptr_value = (void *) expandedValuesList;
SetListCellPtr(valuesListCell, (void *) expandedValuesList);
}
/* reset coltypes, coltypmods, colcollations and rebuild them below */

View File

@ -1174,7 +1174,7 @@ CreateDistributedSubPlan(uint32 subPlanId, Query *subPlanQuery)
}
DistributedSubPlan *subPlan = CitusMakeNode(DistributedSubPlan);
subPlan->plan = planner(subPlanQuery, cursorOptions, NULL);
subPlan->plan = planner_compat(subPlanQuery, cursorOptions, NULL);
subPlan->subPlanId = subPlanId;
return subPlan;
@ -1681,8 +1681,8 @@ BuildReadIntermediateResultsQuery(List *targetEntryList, List *columnAliasList,
functionColumnVar->vartypmod = columnTypMod;
functionColumnVar->varcollid = columnCollation;
functionColumnVar->varlevelsup = 0;
functionColumnVar->varnoold = 1;
functionColumnVar->varoattno = columnNumber;
functionColumnVar->varnosyn = 1;
functionColumnVar->varattnosyn = columnNumber;
functionColumnVar->location = -1;
TargetEntry *newTargetEntry = makeNode(TargetEntry);

View File

@ -73,8 +73,8 @@ typedef struct AttributeEquivalenceClassMember
static bool ContextContainsLocalRelation(RelationRestrictionContext *restrictionContext);
static Var * FindTranslatedVar(List *appendRelList, Oid relationOid,
Index relationRteIndex, Index *partitionKeyIndex);
static Var * FindUnionAllVar(PlannerInfo *root, List *appendRelList, Oid relationOid,
Index relationRteIndex, Index *partitionKeyIndex);
static bool ContainsMultipleDistributedRelations(PlannerRestrictionContext *
plannerRestrictionContext);
static List * GenerateAttributeEquivalencesForRelationRestrictions(
@ -149,6 +149,7 @@ static JoinRestrictionContext * FilterJoinRestrictionContext(
static bool RangeTableArrayContainsAnyRTEIdentities(RangeTblEntry **rangeTableEntries, int
rangeTableArrayLength, Relids
queryRteIdentities);
static int RangeTableOffsetCompat(PlannerInfo *root, AppendRelInfo *appendRelInfo);
static Relids QueryRteIdentities(Query *queryTree);
static bool JoinRestrictionListExistsInContext(JoinRestriction *joinRestrictionInput,
JoinRestrictionContext *
@ -275,10 +276,10 @@ SafeToPushdownUnionSubquery(PlannerRestrictionContext *plannerRestrictionContext
*/
if (appendRelList != NULL)
{
varToBeAdded = FindTranslatedVar(appendRelList,
relationRestriction->relationId,
relationRestriction->index,
&partitionKeyIndex);
varToBeAdded = FindUnionAllVar(relationPlannerRoot, appendRelList,
relationRestriction->relationId,
relationRestriction->index,
&partitionKeyIndex);
/* union does not have partition key in the target list */
if (partitionKeyIndex == 0)
@ -370,22 +371,16 @@ SafeToPushdownUnionSubquery(PlannerRestrictionContext *plannerRestrictionContext
/*
* FindTranslatedVar iterates on the appendRelList and tries to find a translated
* child var identified by the relation id and the relation rte index.
*
* Note that postgres translates UNION ALL target list elements into translated_vars
* list on the corresponding AppendRelInfo struct. For details, see the related
* structs.
*
* The function returns NULL if it cannot find a translated var.
* FindUnionAllVar finds the variable used in union all for the side that has
* relationRteIndex as its index and the same varattno as the partition key of
* the given relation with relationOid.
*/
static Var *
FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex,
Index *partitionKeyIndex)
FindUnionAllVar(PlannerInfo *root, List *appendRelList, Oid relationOid,
Index relationRteIndex, Index *partitionKeyIndex)
{
ListCell *appendRelCell = NULL;
AppendRelInfo *targetAppendRelInfo = NULL;
ListCell *translatedVarCell = NULL;
AttrNumber childAttrNumber = 0;
*partitionKeyIndex = 0;
@ -395,25 +390,41 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex,
{
AppendRelInfo *appendRelInfo = (AppendRelInfo *) lfirst(appendRelCell);
int rtoffset = RangeTableOffsetCompat(root, appendRelInfo);
/*
* We're only interested in the child rel that is equal to the
* relation we're investigating.
*/
if (appendRelInfo->child_relid == relationRteIndex)
if (appendRelInfo->child_relid - rtoffset == relationRteIndex)
{
targetAppendRelInfo = appendRelInfo;
break;
}
}
/* we couldn't find the necessary append rel info */
if (targetAppendRelInfo == NULL)
if (!targetAppendRelInfo)
{
return NULL;
}
Var *relationPartitionKey = ForceDistPartitionKey(relationOid);
#if PG_VERSION_NUM >= PG_VERSION_13
for (; childAttrNumber < targetAppendRelInfo->num_child_cols; childAttrNumber++)
{
int curAttNo = targetAppendRelInfo->parent_colnos[childAttrNumber];
if (curAttNo == relationPartitionKey->varattno)
{
*partitionKeyIndex = (childAttrNumber + 1);
int rtoffset = RangeTableOffsetCompat(root, targetAppendRelInfo);
relationPartitionKey->varno = targetAppendRelInfo->child_relid - rtoffset;
return relationPartitionKey;
}
}
#else
ListCell *translatedVarCell;
List *translaterVars = targetAppendRelInfo->translated_vars;
foreach(translatedVarCell, translaterVars)
{
@ -435,7 +446,7 @@ FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex,
return targetVar;
}
}
#endif
return NULL;
}
@ -1346,9 +1357,10 @@ AddUnionAllSetOperationsToAttributeEquivalenceClass(AttributeEquivalenceClass **
{
continue;
}
int rtoffset = RangeTableOffsetCompat(root, appendRelInfo);
/* set the varno accordingly for this specific child */
varToBeAdded->varno = appendRelInfo->child_relid;
varToBeAdded->varno = appendRelInfo->child_relid - rtoffset;
AddToAttributeEquivalenceClass(attributeEquivalenceClass, root,
varToBeAdded);
@ -1356,6 +1368,31 @@ AddUnionAllSetOperationsToAttributeEquivalenceClass(AttributeEquivalenceClass **
}
/*
* RangeTableOffsetCompat returns the range table offset(in glob->finalrtable) for the appendRelInfo.
* For PG < 13 this is a no op.
*/
static int
RangeTableOffsetCompat(PlannerInfo *root, AppendRelInfo *appendRelInfo)
{
#if PG_VERSION_NUM >= PG_VERSION_13
int i = 1;
for (; i < root->simple_rel_array_size; i++)
{
RangeTblEntry *rte = root->simple_rte_array[i];
if (rte->inh)
{
break;
}
}
int indexInRtable = (i - 1);
return appendRelInfo->parent_relid - 1 - (indexInRtable);
#else
return 0;
#endif
}
/*
* AddUnionSetOperationsToAttributeEquivalenceClass recursively iterates on all the
* setOperations and adds each corresponding target entry to the given equivalence

View File

@ -14,6 +14,7 @@
#include "catalog/pg_type.h"
#include "distributed/metadata_cache.h"
#include "distributed/tdigest_extension.h"
#include "distributed/version_compat.h"
#include "parser/parse_func.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
@ -32,7 +33,7 @@ TDigestExtensionSchema()
Form_pg_extension extensionForm = NULL;
Oid tdigestExtensionSchema = InvalidOid;
Relation relation = heap_open(ExtensionRelationId, AccessShareLock);
Relation relation = table_open(ExtensionRelationId, AccessShareLock);
ScanKeyInit(&entry[0],
Anum_pg_extension_extname,
@ -57,7 +58,7 @@ TDigestExtensionSchema()
systable_endscan(scandesc);
heap_close(relation, AccessShareLock);
table_close(relation, AccessShareLock);
return tdigestExtensionSchema;
}

View File

@ -27,6 +27,7 @@
#include "distributed/remote_commands.h"
#include "distributed/tuplestore.h"
#include "distributed/listutils.h"
#include "distributed/version_compat.h"
#include "tcop/tcopprot.h"
PG_FUNCTION_INFO_V1(partition_task_list_results);
@ -49,9 +50,10 @@ partition_task_list_results(PG_FUNCTION_ARGS)
bool binaryFormat = PG_GETARG_BOOL(3);
Query *parsedQuery = ParseQueryString(queryString, NULL, 0);
PlannedStmt *queryPlan = pg_plan_query(parsedQuery,
CURSOR_OPT_PARALLEL_OK,
NULL);
PlannedStmt *queryPlan = pg_plan_query_compat(parsedQuery,
queryString,
CURSOR_OPT_PARALLEL_OK,
NULL);
if (!IsCitusCustomScan(queryPlan->planTree))
{
ereport(ERROR, (errmsg("query must be distributed and shouldn't require "
@ -122,9 +124,10 @@ redistribute_task_list_results(PG_FUNCTION_ARGS)
bool binaryFormat = PG_GETARG_BOOL(3);
Query *parsedQuery = ParseQueryString(queryString, NULL, 0);
PlannedStmt *queryPlan = pg_plan_query(parsedQuery,
CURSOR_OPT_PARALLEL_OK,
NULL);
PlannedStmt *queryPlan = pg_plan_query_compat(parsedQuery,
queryString,
CURSOR_OPT_PARALLEL_OK,
NULL);
if (!IsCitusCustomScan(queryPlan->planTree))
{
ereport(ERROR, (errmsg("query must be distributed and shouldn't require "

View File

@ -14,8 +14,9 @@
#include "fmgr.h"
#include "funcapi.h"
#include "distributed/listutils.h"
#include "distributed/metadata_cache.h"
#include "distributed/version_compat.h"
/* these functions are only exported in the regression tests */
PG_FUNCTION_INFO_V1(get_referencing_relation_id_list);
@ -47,10 +48,12 @@ get_referencing_relation_id_list(PG_FUNCTION_ARGS)
MemoryContextSwitchTo(functionContext->multi_call_memory_ctx);
List *refList = list_copy(
cacheEntry->referencingRelationsViaForeignKey);
MemoryContextSwitchTo(oldContext);
ListCellAndListWrapper *wrapper = palloc0(sizeof(ListCellAndListWrapper));
foreignRelationCell = list_head(refList);
functionContext->user_fctx = foreignRelationCell;
wrapper->list = refList;
wrapper->listCell = foreignRelationCell;
functionContext->user_fctx = wrapper;
MemoryContextSwitchTo(oldContext);
}
/*
@ -61,12 +64,13 @@ get_referencing_relation_id_list(PG_FUNCTION_ARGS)
*/
functionContext = SRF_PERCALL_SETUP();
foreignRelationCell = (ListCell *) functionContext->user_fctx;
if (foreignRelationCell != NULL)
ListCellAndListWrapper *wrapper =
(ListCellAndListWrapper *) functionContext->user_fctx;
if (wrapper->listCell != NULL)
{
Oid refId = lfirst_oid(foreignRelationCell);
Oid refId = lfirst_oid(wrapper->listCell);
functionContext->user_fctx = lnext(foreignRelationCell);
wrapper->listCell = lnext_compat(wrapper->list, wrapper->listCell);
SRF_RETURN_NEXT(functionContext, PointerGetDatum(refId));
}
@ -102,10 +106,12 @@ get_referenced_relation_id_list(PG_FUNCTION_ARGS)
MemoryContext oldContext =
MemoryContextSwitchTo(functionContext->multi_call_memory_ctx);
List *refList = list_copy(cacheEntry->referencedRelationsViaForeignKey);
MemoryContextSwitchTo(oldContext);
foreignRelationCell = list_head(refList);
functionContext->user_fctx = foreignRelationCell;
ListCellAndListWrapper *wrapper = palloc0(sizeof(ListCellAndListWrapper));
wrapper->list = refList;
wrapper->listCell = foreignRelationCell;
functionContext->user_fctx = wrapper;
MemoryContextSwitchTo(oldContext);
}
/*
@ -116,12 +122,14 @@ get_referenced_relation_id_list(PG_FUNCTION_ARGS)
*/
functionContext = SRF_PERCALL_SETUP();
foreignRelationCell = (ListCell *) functionContext->user_fctx;
if (foreignRelationCell != NULL)
{
Oid refId = lfirst_oid(foreignRelationCell);
ListCellAndListWrapper *wrapper =
(ListCellAndListWrapper *) functionContext->user_fctx;
functionContext->user_fctx = lnext(foreignRelationCell);
if (wrapper->listCell != NULL)
{
Oid refId = lfirst_oid(wrapper->listCell);
wrapper->listCell = lnext_compat(wrapper->list, wrapper->listCell);
SRF_RETURN_NEXT(functionContext, PointerGetDatum(refId));
}

View File

@ -15,6 +15,8 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "miscadmin.h"
#include "access/xact.h"
@ -27,6 +29,9 @@
#include "distributed/metadata_cache.h"
#include "distributed/relation_access_tracking.h"
#include "utils/hsearch.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h"
#endif
#include "utils/lsyscache.h"

View File

@ -95,7 +95,8 @@ LogTransactionRecord(int32 groupId, char *transactionName)
values[Anum_pg_dist_transaction_gid - 1] = CStringGetTextDatum(transactionName);
/* open transaction relation and insert new tuple */
Relation pgDistTransaction = heap_open(DistTransactionRelationId(), RowExclusiveLock);
Relation pgDistTransaction = table_open(DistTransactionRelationId(),
RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistTransaction);
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
@ -105,7 +106,7 @@ LogTransactionRecord(int32 groupId, char *transactionName)
CommandCounterIncrement();
/* close relation and invalidate previous cache entry */
heap_close(pgDistTransaction, NoLock);
table_close(pgDistTransaction, NoLock);
}
@ -171,8 +172,8 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
MemoryContext oldContext = MemoryContextSwitchTo(localContext);
/* take table lock first to avoid running concurrently */
Relation pgDistTransaction = heap_open(DistTransactionRelationId(),
ShareUpdateExclusiveLock);
Relation pgDistTransaction = table_open(DistTransactionRelationId(),
ShareUpdateExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistTransaction);
/*
@ -344,7 +345,7 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
}
systable_endscan(scanDescriptor);
heap_close(pgDistTransaction, NoLock);
table_close(pgDistTransaction, NoLock);
if (!recoveryFailed)
{

View File

@ -29,6 +29,7 @@
#include "distributed/pg_dist_colocation.h"
#include "distributed/resource_lock.h"
#include "distributed/shardinterval_utils.h"
#include "distributed/version_compat.h"
#include "distributed/worker_protocol.h"
#include "distributed/worker_transaction.h"
#include "storage/lmgr.h"
@ -154,7 +155,7 @@ BreakColocation(Oid sourceRelationId)
* can be sure that there will no modifications on the colocation table
* until this transaction is committed.
*/
Relation pgDistColocation = heap_open(DistColocationRelationId(), ExclusiveLock);
Relation pgDistColocation = table_open(DistColocationRelationId(), ExclusiveLock);
uint32 newColocationId = GetNextColocationId();
UpdateRelationColocationGroup(sourceRelationId, newColocationId);
@ -162,7 +163,7 @@ BreakColocation(Oid sourceRelationId)
/* if there is not any remaining table in the colocation group, delete it */
DeleteColocationGroupIfNoTablesBelong(sourceRelationId);
heap_close(pgDistColocation, NoLock);
table_close(pgDistColocation, NoLock);
}
@ -248,7 +249,7 @@ MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId)
* can be sure that there will no modifications on the colocation table
* until this transaction is committed.
*/
Relation pgDistColocation = heap_open(DistColocationRelationId(), ExclusiveLock);
Relation pgDistColocation = table_open(DistColocationRelationId(), ExclusiveLock);
/* check if shard placements are colocated */
ErrorIfShardPlacementsNotColocated(sourceRelationId, targetRelationId);
@ -271,7 +272,7 @@ MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId)
/* if there is not any remaining table in the colocation group, delete it */
DeleteColocationGroupIfNoTablesBelong(targetColocationId);
heap_close(pgDistColocation, NoLock);
table_close(pgDistColocation, NoLock);
}
@ -514,7 +515,7 @@ ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType,
ScanKeyData scanKey[4];
bool indexOK = true;
Relation pgDistColocation = heap_open(DistColocationRelationId(), AccessShareLock);
Relation pgDistColocation = table_open(DistColocationRelationId(), AccessShareLock);
/* set scan arguments */
ScanKeyInit(&scanKey[0], Anum_pg_dist_colocation_distributioncolumntype,
@ -541,7 +542,7 @@ ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType,
}
systable_endscan(scanDescriptor);
heap_close(pgDistColocation, AccessShareLock);
table_close(pgDistColocation, AccessShareLock);
return colocationId;
}
@ -574,7 +575,7 @@ CreateColocationGroup(int shardCount, int replicationFactor, Oid distributionCol
ObjectIdGetDatum(distributionColumnCollation);
/* open colocation relation and insert the new tuple */
Relation pgDistColocation = heap_open(DistColocationRelationId(), RowExclusiveLock);
Relation pgDistColocation = table_open(DistColocationRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistColocation);
HeapTuple heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls);
@ -583,7 +584,7 @@ CreateColocationGroup(int shardCount, int replicationFactor, Oid distributionCol
/* increment the counter so that next command can see the row */
CommandCounterIncrement();
heap_close(pgDistColocation, RowExclusiveLock);
table_close(pgDistColocation, RowExclusiveLock);
return colocationId;
}
@ -716,7 +717,7 @@ UpdateRelationColocationGroup(Oid distributedRelationId, uint32 colocationId)
bool isNull[Natts_pg_dist_partition];
bool replace[Natts_pg_dist_partition];
Relation pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock);
Relation pgDistPartition = table_open(DistPartitionRelationId(), RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition);
ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid,
@ -753,7 +754,7 @@ UpdateRelationColocationGroup(Oid distributedRelationId, uint32 colocationId)
CommandCounterIncrement();
systable_endscan(scanDescriptor);
heap_close(pgDistPartition, NoLock);
table_close(pgDistPartition, NoLock);
bool shouldSyncMetadata = ShouldSyncTableMetadata(distributedRelationId);
if (shouldSyncMetadata)
@ -882,7 +883,7 @@ ColocationGroupTableList(Oid colocationId)
ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_colocationid,
BTEqualStrategyNumber, F_INT4EQ, ObjectIdGetDatum(colocationId));
Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock);
Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition);
SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition,
DistPartitionColocationidIndexId(),
@ -901,7 +902,7 @@ ColocationGroupTableList(Oid colocationId)
}
systable_endscan(scanDescriptor);
heap_close(pgDistPartition, AccessShareLock);
table_close(pgDistPartition, AccessShareLock);
return colocatedTableList;
}
@ -997,7 +998,7 @@ ColocatedTableId(Oid colocationId)
ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_colocationid,
BTEqualStrategyNumber, F_INT4EQ, ObjectIdGetDatum(colocationId));
Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock);
Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition);
SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition,
DistPartitionColocationidIndexId(),
@ -1034,7 +1035,7 @@ ColocatedTableId(Oid colocationId)
}
systable_endscan(scanDescriptor);
heap_close(pgDistPartition, AccessShareLock);
table_close(pgDistPartition, AccessShareLock);
return colocatedTableId;
}
@ -1085,7 +1086,7 @@ DeleteColocationGroup(uint32 colocationId)
ScanKeyData scanKey[1];
bool indexOK = false;
Relation pgDistColocation = heap_open(DistColocationRelationId(), RowExclusiveLock);
Relation pgDistColocation = table_open(DistColocationRelationId(), RowExclusiveLock);
ScanKeyInit(&scanKey[0], Anum_pg_dist_colocation_colocationid,
BTEqualStrategyNumber, F_INT4EQ, UInt32GetDatum(colocationId));
@ -1108,9 +1109,9 @@ DeleteColocationGroup(uint32 colocationId)
CitusInvalidateRelcacheByRelid(DistColocationRelationId());
CommandCounterIncrement();
heap_close(replicaIndex, AccessShareLock);
table_close(replicaIndex, AccessShareLock);
}
systable_endscan(scanDescriptor);
heap_close(pgDistColocation, RowExclusiveLock);
table_close(pgDistColocation, RowExclusiveLock);
}

View File

@ -31,6 +31,9 @@
#include "storage/lockdefs.h"
#include "utils/fmgroids.h"
#include "utils/hsearch.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h"
#endif
#include "utils/memutils.h"
@ -296,7 +299,7 @@ PopulateAdjacencyLists(void)
Oid prevReferencedOid = InvalidOid;
List *frelEdgeList = NIL;
Relation pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_constraint_contype, BTEqualStrategyNumber, F_CHAREQ,
CharGetDatum(CONSTRAINT_FOREIGN));
@ -345,7 +348,7 @@ PopulateAdjacencyLists(void)
}
systable_endscan(scanDescriptor);
heap_close(pgConstraint, AccessShareLock);
table_close(pgConstraint, AccessShareLock);
}

View File

@ -16,6 +16,8 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include <time.h>
#include "miscadmin.h"
@ -49,6 +51,9 @@
#include "storage/lmgr.h"
#include "storage/lwlock.h"
#include "tcop/tcopprot.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h"
#endif
#include "utils/memutils.h"
#include "utils/lsyscache.h"

View File

@ -21,6 +21,7 @@
#include "distributed/coordinator_protocol.h"
#include "distributed/multi_partitioning_utils.h"
#include "distributed/shardinterval_utils.h"
#include "distributed/version_compat.h"
#include "lib/stringinfo.h"
#include "nodes/pg_list.h"
#include "pgstat.h"
@ -58,7 +59,7 @@ PartitionedTable(Oid relationId)
}
/* keep the lock */
heap_close(rel, NoLock);
table_close(rel, NoLock);
return partitionedTable;
}
@ -87,7 +88,7 @@ PartitionedTableNoLock(Oid relationId)
}
/* keep the lock */
heap_close(rel, NoLock);
table_close(rel, NoLock);
return partitionedTable;
}
@ -110,7 +111,7 @@ PartitionTable(Oid relationId)
bool partitionTable = rel->rd_rel->relispartition;
/* keep the lock */
heap_close(rel, NoLock);
table_close(rel, NoLock);
return partitionTable;
}
@ -135,7 +136,7 @@ PartitionTableNoLock(Oid relationId)
bool partitionTable = rel->rd_rel->relispartition;
/* keep the lock */
heap_close(rel, NoLock);
table_close(rel, NoLock);
return partitionTable;
}
@ -185,7 +186,7 @@ IsChildTable(Oid relationId)
HeapTuple inheritsTuple = NULL;
bool tableInherits = false;
Relation pgInherits = heap_open(InheritsRelationId, AccessShareLock);
Relation pgInherits = table_open(InheritsRelationId, AccessShareLock);
ScanKeyInit(&key[0], Anum_pg_inherits_inhrelid,
BTEqualStrategyNumber, F_OIDEQ,
@ -207,7 +208,7 @@ IsChildTable(Oid relationId)
}
systable_endscan(scan);
heap_close(pgInherits, AccessShareLock);
table_close(pgInherits, AccessShareLock);
if (tableInherits && PartitionTable(relationId))
{
@ -229,7 +230,7 @@ IsParentTable(Oid relationId)
ScanKeyData key[1];
bool tableInherited = false;
Relation pgInherits = heap_open(InheritsRelationId, AccessShareLock);
Relation pgInherits = table_open(InheritsRelationId, AccessShareLock);
ScanKeyInit(&key[0], Anum_pg_inherits_inhparent,
BTEqualStrategyNumber, F_OIDEQ,
@ -243,7 +244,7 @@ IsParentTable(Oid relationId)
tableInherited = true;
}
systable_endscan(scan);
heap_close(pgInherits, AccessShareLock);
table_close(pgInherits, AccessShareLock);
if (tableInherited && PartitionedTable(relationId))
{
@ -277,7 +278,7 @@ PartitionParentOid(Oid partitionOid)
List *
PartitionList(Oid parentRelationId)
{
Relation rel = heap_open(parentRelationId, AccessShareLock);
Relation rel = table_open(parentRelationId, AccessShareLock);
List *partitionList = NIL;
@ -298,7 +299,7 @@ PartitionList(Oid parentRelationId)
}
/* keep the lock */
heap_close(rel, NoLock);
table_close(rel, NoLock);
return partitionList;
}

View File

@ -197,14 +197,14 @@ DistributedTablesSize(List *distTableOids)
if (PartitionMethod(relationId) == DISTRIBUTE_BY_HASH &&
!SingleReplicatedTable(relationId))
{
heap_close(relation, AccessShareLock);
table_close(relation, AccessShareLock);
continue;
}
Datum tableSizeDatum = DirectFunctionCall1(citus_table_size,
ObjectIdGetDatum(relationId));
totalSize += DatumGetInt64(tableSizeDatum);
heap_close(relation, AccessShareLock);
table_close(relation, AccessShareLock);
}
return totalSize;

View File

@ -6,6 +6,12 @@
#include <unistd.h>
#include <math.h>
#include "distributed/pg_version_constants.h"
#if PG_VERSION_NUM >= PG_VERSION_13
#include "common/hashfn.h"
#endif
#include "commands/dbcommands.h"
#include "distributed/citus_custom_scan.h"
#include "distributed/citus_nodes.h"

View File

@ -278,7 +278,7 @@ worker_cleanup_job_schema_cache(PG_FUNCTION_ARGS)
CheckCitusVersion(ERROR);
pgNamespace = heap_open(NamespaceRelationId, AccessExclusiveLock);
pgNamespace = table_open(NamespaceRelationId, AccessExclusiveLock);
#if PG_VERSION_NUM >= PG_VERSION_12
scanDescriptor = table_beginscan_catalog(pgNamespace, scanKeyCount, scanKey);
#else
@ -304,7 +304,7 @@ worker_cleanup_job_schema_cache(PG_FUNCTION_ARGS)
}
heap_endscan(scanDescriptor);
heap_close(pgNamespace, AccessExclusiveLock);
table_close(pgNamespace, AccessExclusiveLock);
PG_RETURN_VOID();
}

View File

@ -15,6 +15,7 @@
#include "distributed/metadata_utility.h"
#include "distributed/metadata_cache.h"
#include "distributed/version_compat.h"
#include "nodes/execnodes.h"
#include "nodes/parsenodes.h"
#include "parser/parse_coerce.h"
@ -155,7 +156,8 @@ extern void AppendCopyBinaryHeaders(CopyOutState headerOutputState);
extern void AppendCopyBinaryFooters(CopyOutState footerOutputState);
extern void EndRemoteCopy(int64 shardId, List *connectionList);
extern List * CreateRangeTable(Relation rel, AclMode requiredAccess);
extern Node * ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag,
extern Node * ProcessCopyStmt(CopyStmt *copyStatement,
QueryCompletionCompat *completionTag,
const char *queryString);
extern void CheckCopyPermissions(CopyStmt *copyStatement);
extern bool IsCopyResultStmt(CopyStmt *copyStatement);

View File

@ -10,6 +10,8 @@
#ifndef MULTI_UTILITY_H
#define MULTI_UTILITY_H
#include "distributed/pg_version_constants.h"
#include "postgres.h"
#include "utils/relcache.h"
@ -51,10 +53,13 @@ typedef struct DDLJob
extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
ProcessUtilityContext context, ParamListInfo params,
struct QueryEnvironment *queryEnv, DestReceiver *dest,
char *completionTag);
QueryCompletionCompat *completionTag
);
extern void CitusProcessUtility(Node *node, const char *queryString,
ProcessUtilityContext context, ParamListInfo params,
DestReceiver *dest, char *completionTag);
DestReceiver *dest,
QueryCompletionCompat *completionTag
);
extern void MarkInvalidateForeignKeyGraph(void);
extern void InvalidateForeignKeyGraphForDDL(void);
extern List * DDLTaskList(Oid relationId, const char *commandString);

View File

@ -18,6 +18,7 @@
#include "nodes/nodes.h"
#include "nodes/parsenodes.h"
#include "catalog/objectaddress.h"
#include "lib/stringinfo.h"
/* forward declarations for format_collate.c */
/* Control flags for FormatCollateExtended, compatible with format_type_extended */

View File

@ -183,8 +183,18 @@ typedef struct CitusCustomScanPath
} CitusCustomScanPath;
extern PlannedStmt * distributed_planner(Query *parse, int cursorOptions,
#if PG_VERSION_NUM >= PG_VERSION_13
extern PlannedStmt * distributed_planner(Query *parse,
const char *query_string,
int cursorOptions,
ParamListInfo boundParams);
#else
extern PlannedStmt * distributed_planner(Query *parse,
int cursorOptions,
ParamListInfo boundParams);
#endif
extern List * ExtractRangeTableEntryList(Query *query);
extern List * ExtractReferenceTableRTEList(List *rteList);
extern bool NeedsDistributedPlanning(Query *query);

View File

@ -18,8 +18,21 @@
#include "nodes/pg_list.h"
#include "utils/array.h"
#include "utils/hsearch.h"
#include "distributed/version_compat.h"
/*
* ListCellAndListWrapper stores a list and list cell.
* This struct is used for functionContext. When iterating a list
* in separate function calls, we need both the list and the current cell.
* Therefore this wrapper stores both of them.
*/
typedef struct ListCellAndListWrapper
{
List *list;
ListCell *listCell;
} ListCellAndListWrapper;
/*
* foreach_ptr -
* a convenience macro which loops through a pointer list without needing a
@ -39,7 +52,7 @@
for (ListCell *(var ## CellDoNotUse) = list_head(l); \
(var ## CellDoNotUse) != NULL && \
(((var) = lfirst(var ## CellDoNotUse)) || true); \
var ## CellDoNotUse = lnext(var ## CellDoNotUse))
var ## CellDoNotUse = lnext_compat(l, var ## CellDoNotUse))
/*
@ -52,7 +65,7 @@
for (ListCell *(var ## CellDoNotUse) = list_head(l); \
(var ## CellDoNotUse) != NULL && \
(((var) = lfirst_int(var ## CellDoNotUse)) || true); \
var ## CellDoNotUse = lnext(var ## CellDoNotUse))
var ## CellDoNotUse = lnext_compat(l, var ## CellDoNotUse))
/*
@ -65,7 +78,7 @@
for (ListCell *(var ## CellDoNotUse) = list_head(l); \
(var ## CellDoNotUse) != NULL && \
(((var) = lfirst_oid(var ## CellDoNotUse)) || true); \
var ## CellDoNotUse = lnext(var ## CellDoNotUse))
var ## CellDoNotUse = lnext_compat(l, var ## CellDoNotUse))
/* utility functions declaration shared within this module */

View File

@ -14,5 +14,6 @@
#define PG_VERSION_11 110000
#define PG_VERSION_12 120000
#define PG_VERSION_13 130000
#define PG_VERSION_14 140000
#endif /* PG_VERSION_CONSTANTS */

View File

@ -24,6 +24,38 @@
#include "optimizer/optimizer.h"
#endif
#if (PG_VERSION_NUM >= PG_VERSION_13)
#include "tcop/tcopprot.h"
#endif
#if PG_VERSION_NUM >= PG_VERSION_13
#define lnext_compat(l, r) lnext(l, r)
#define list_delete_cell_compat(l, c, p) list_delete_cell(l, c)
#define pg_plan_query_compat(p, q, c, b) pg_plan_query(p, q, c, b)
#define planner_compat(p, c, b) planner(p, NULL, c, b)
#define standard_planner_compat(a, c, d) standard_planner(a, NULL, c, d)
#define getOwnedSequencesCompat(a, b) getOwnedSequences(a)
#define CMDTAG_SELECT_COMPAT CMDTAG_SELECT
#define ExplainOnePlanCompat(a, b, c, d, e, f, g, h) \
ExplainOnePlan(a, b, c, d, e, f, g, h)
#define SetListCellPtr(a, b) ((a)->ptr_value = (b))
#define RangeTableEntryFromNSItem(a) ((a)->p_rte)
#define QueryCompletionCompat QueryCompletion
#else /* pre PG13 */
#define lnext_compat(l, r) lnext(r)
#define list_delete_cell_compat(l, c, p) list_delete_cell(l, c, p)
#define pg_plan_query_compat(p, q, c, b) pg_plan_query(p, c, b)
#define planner_compat(p, c, b) planner(p, c, b)
#define standard_planner_compat(a, c, d) standard_planner(a, c, d)
#define CMDTAG_SELECT_COMPAT "SELECT"
#define getOwnedSequencesCompat(a, b) getOwnedSequences(a, b)
#define ExplainOnePlanCompat(a, b, c, d, e, f, g, h) ExplainOnePlan(a, b, c, d, e, f, g)
#define SetListCellPtr(a, b) ((a)->data.ptr_value = (b))
#define RangeTableEntryFromNSItem(a) (a)
#define QueryCompletionCompat char
#define varattnosyn varoattno
#define varnosyn varnoold
#endif
#if PG_VERSION_NUM >= PG_VERSION_12
#define MakeSingleTupleTableSlotCompat MakeSingleTupleTableSlot
@ -87,6 +119,10 @@ FileCompatFromFileStart(File fileDesc)
#else /* pre PG12 */
#define table_open(r, l) heap_open(r, l)
#define table_openrv(r, l) heap_openrv(r, l)
#define table_openrv_extended(r, l, m) heap_openrv_extended(r, l, m)
#define table_close(r, l) heap_close(r, l)
#define QTW_EXAMINE_RTES_BEFORE QTW_EXAMINE_RTES
#define MakeSingleTupleTableSlotCompat(tupleDesc, tts_opts) \
MakeSingleTupleTableSlot(tupleDesc)

View File

@ -50,6 +50,9 @@ s/"(raw_events_second_user_id_value_1_key_|agg_events_user_id_value_1_agg_key_)[
# ignore could not consume warnings
/WARNING: could not consume data from worker node/d
# ignore page split with pg13
/DEBUG: concurrent ROOT page split/d
# ignore WAL warnings
/DEBUG: .+creating and filling new WAL file/d
@ -86,6 +89,21 @@ s/_ref_id_id_fkey_/_ref_id_fkey_/g
s/fk_test_2_col1_col2_fkey/fk_test_2_col1_fkey/g
s/_id_other_column_ref_fkey/_id_fkey/g
# pg13 changes
s/of relation ".*" violates not-null constraint/violates not-null constraint/g
s/varnosyn/varnoold/g
s/varattnosyn/varoattno/g
/DEBUG: index ".*" can safely use deduplication.*$/d
/DEBUG: index ".*" cannot use deduplication.*$/d
/DEBUG: building index ".*" on table ".*" serially.*$/d
s/partition ".*" would be violated by some row/partition would be violated by some row/g
/.*Peak Memory Usage:.*$/d
s/of relation ".*" contains null values/contains null values/g
s/of relation "t1" is violated by some row/is violated by some row/g
# can be removed when we remove PG_VERSION_NUM >= 120000
s/(.*)Output:.*$/\1Output: xxxxxx/g
# intermediate_results
s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g

View File

@ -335,11 +335,11 @@ having sum(s_order_cnt) >
where mod((s_w_id * s_i_id),10000) = s_suppkey
and s_nationkey = n_nationkey
and n_name = 'GERMANY')
order by ordercount desc;
order by s_i_id, ordercount desc;
s_i_id | ordercount
---------------------------------------------------------------------
33 | 1
1 | 1
33 | 1
(2 rows)
insert into stock VALUES
@ -356,7 +356,7 @@ having sum(s_order_cnt) >
where mod((s_w_id * s_i_id),10000) = s_suppkey
and s_nationkey = n_nationkey
and n_name = 'GERMANY')
order by ordercount desc;
order by s_i_id, ordercount desc;
s_i_id | ordercount
---------------------------------------------------------------------
1 | 100001

View File

@ -345,11 +345,11 @@ having sum(s_order_cnt) >
where mod((s_w_id * s_i_id),10000) = s_suppkey
and s_nationkey = n_nationkey
and n_name = 'GERMANY')
order by ordercount desc;
order by s_i_id, ordercount desc;
s_i_id | ordercount
---------------------------------------------------------------------
33 | 1
1 | 1
33 | 1
(2 rows)
insert into stock VALUES
@ -366,7 +366,7 @@ having sum(s_order_cnt) >
where mod((s_w_id * s_i_id),10000) = s_suppkey
and s_nationkey = n_nationkey
and n_name = 'GERMANY')
order by ordercount desc;
order by s_i_id, ordercount desc;
s_i_id | ordercount
---------------------------------------------------------------------
1 | 100001

View File

@ -12,10 +12,10 @@ INSERT INTO test_table SELECT i % 10, 'test' || i, row_to_json(row(i, i*18, 'tes
-- server version because CTE inlining might produce
-- different debug messages in PG 11 vs PG 12
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int;
substring
SELECT substring(:'server_version', '\d+')::int >= 12;
?column?
---------------------------------------------------------------------
12
t
(1 row)
SET client_min_messages TO DEBUG;
@ -725,7 +725,8 @@ WITH cte_1 AS (SELECT * FROM test_table),
cte_2 AS (SELECT * FROM test_table ORDER BY 1 DESC LIMIT 3)
(SELECT *, (SELECT 1) FROM cte_1 EXCEPT SELECT *, 1 FROM test_table)
UNION
(SELECT *, 1 FROM cte_2);
(SELECT *, 1 FROM cte_2)
ORDER BY 1,2;
DEBUG: CTE cte_1 is going to be inlined via distributed planning
DEBUG: CTE cte_2 is going to be inlined via distributed planning
DEBUG: Router planner cannot handle multi-shard select queries
@ -743,13 +744,13 @@ DEBUG: Creating router plan
DEBUG: generating subplan XXX_3 for subquery SELECT key, value, other_value, (SELECT 1) FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_1
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_4 for subquery SELECT key, value, other_value, 1 FROM cte_inline.test_table
DEBUG: Plan XXX query after replacing subqueries and CTEs: (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer) EXCEPT SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer)) UNION SELECT cte_2.key, cte_2.value, cte_2.other_value, 1 FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_2
DEBUG: Plan XXX query after replacing subqueries and CTEs: (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer) EXCEPT SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer)) UNION SELECT cte_2.key, cte_2.value, cte_2.other_value, 1 FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_2 ORDER BY 1, 2
DEBUG: Creating router plan
key | value | other_value | ?column?
---------------------------------------------------------------------
9 | test19 | {"f1": 19, "f2": 342, "f3": "test19"} | 1
9 | test29 | {"f1": 29, "f2": 522, "f3": "test29"} | 1
9 | test9 | {"f1": 9, "f2": 162, "f3": "test9"} | 1
9 | test19 | {"f1": 19, "f2": 342, "f3": "test19"} | 1
(3 rows)
-- cte_1 is safe to inline, even if because after inlining

View File

@ -12,10 +12,10 @@ INSERT INTO test_table SELECT i % 10, 'test' || i, row_to_json(row(i, i*18, 'tes
-- server version because CTE inlining might produce
-- different debug messages in PG 11 vs PG 12
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int;
substring
SELECT substring(:'server_version', '\d+')::int >= 12;
?column?
---------------------------------------------------------------------
11
f
(1 row)
SET client_min_messages TO DEBUG;
@ -617,7 +617,8 @@ WITH cte_1 AS (SELECT * FROM test_table),
cte_2 AS (SELECT * FROM test_table ORDER BY 1 DESC LIMIT 3)
(SELECT *, (SELECT 1) FROM cte_1 EXCEPT SELECT *, 1 FROM test_table)
UNION
(SELECT *, 1 FROM cte_2);
(SELECT *, 1 FROM cte_2)
ORDER BY 1,2;
DEBUG: CTE cte_1 is going to be inlined via distributed planning
DEBUG: CTE cte_2 is going to be inlined via distributed planning
DEBUG: Router planner cannot handle multi-shard select queries
@ -635,13 +636,13 @@ DEBUG: Creating router plan
DEBUG: generating subplan XXX_3 for subquery SELECT key, value, other_value, (SELECT 1) FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_1
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_4 for subquery SELECT key, value, other_value, 1 FROM cte_inline.test_table
DEBUG: Plan XXX query after replacing subqueries and CTEs: (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer) EXCEPT SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer)) UNION SELECT cte_2.key, cte_2.value, cte_2.other_value, 1 FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_2
DEBUG: Plan XXX query after replacing subqueries and CTEs: (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer) EXCEPT SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value, intermediate_result."?column?" FROM read_intermediate_result('XXX_4'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb, "?column?" integer)) UNION SELECT cte_2.key, cte_2.value, cte_2.other_value, 1 FROM (SELECT intermediate_result.key, intermediate_result.value, intermediate_result.other_value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, other_value jsonb)) cte_2 ORDER BY 1, 2
DEBUG: Creating router plan
key | value | other_value | ?column?
---------------------------------------------------------------------
9 | test19 | {"f1": 19, "f2": 342, "f3": "test19"} | 1
9 | test29 | {"f1": 29, "f2": 522, "f3": "test29"} | 1
9 | test9 | {"f1": 9, "f2": 162, "f3": "test9"} | 1
9 | test19 | {"f1": 19, "f2": 342, "f3": "test19"} | 1
(3 rows)
-- cte_1 is safe to inline, even if because after inlining

View File

@ -192,7 +192,6 @@ BEGIN;
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
DEBUG: validating foreign key constraint "fkey"
ROLLBACK;
BEGIN;
@ -204,7 +203,6 @@ BEGIN;
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
DEBUG: validating foreign key constraint "fkey"
ROLLBACK;
-- case 1.6: SELECT to a reference table is followed by an unrelated DDL
@ -488,7 +486,6 @@ DEBUG: switching to sequential query execution mode
DETAIL: Reference table "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
DEBUG: validating foreign key constraint "fkey"
ROLLBACK;
BEGIN;
@ -497,7 +494,6 @@ DEBUG: switching to sequential query execution mode
DETAIL: Reference table "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
DEBUG: validating foreign key constraint "fkey"
ROLLBACK;
-- case 2.6: UPDATE to a reference table is followed by an unrelated DDL
@ -534,14 +530,12 @@ BEGIN;
DEBUG: switching to sequential query execution mode
DETAIL: Reference table "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode
TRUNCATE on_update_fkey_table;
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
ROLLBACK;
BEGIN;
UPDATE transitive_reference_table SET id = 101 WHERE id = 99;
DEBUG: switching to sequential query execution mode
DETAIL: Reference table "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode
TRUNCATE on_update_fkey_table;
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
ROLLBACK;
-- case 3.1: an unrelated DDL to a reference table is followed by a real-time SELECT
BEGIN;
@ -621,38 +615,30 @@ ROLLBACK;
BEGIN;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table"
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
DEBUG: validating foreign key constraint "fkey"
CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1);
DEBUG: building index "fkey_test_index_1" on table "on_update_fkey_table" serially
ROLLBACK;
BEGIN;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table"
DEBUG: building index "transitive_reference_table_pkey" on table "transitive_reference_table" serially
DEBUG: validating foreign key constraint "fkey"
CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1);
DEBUG: building index "fkey_test_index_1" on table "on_update_fkey_table" serially
ROLLBACK;
-- case 4.6: DDL to reference table followed by a DDL to dist table, both touching fkey columns
BEGIN;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table"
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
DEBUG: validating foreign key constraint "fkey"
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
DEBUG: validating foreign key constraint "fkey"
ROLLBACK;
BEGIN;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table"
DEBUG: building index "transitive_reference_table_pkey" on table "transitive_reference_table" serially
DEBUG: validating foreign key constraint "fkey"
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
DEBUG: rewriting table "on_update_fkey_table"
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
DEBUG: validating foreign key constraint "fkey"
ROLLBACK;
-- case 3.7: DDL to a reference table is followed by COPY
@ -674,31 +660,25 @@ BEGIN;
DEBUG: switching to sequential query execution mode
DETAIL: Reference table "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode
TRUNCATE on_update_fkey_table;
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
ROLLBACK;
BEGIN;
ALTER TABLE transitive_reference_table ADD COLUMN X int;
DEBUG: switching to sequential query execution mode
DETAIL: Reference table "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode
TRUNCATE on_update_fkey_table;
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
ROLLBACK;
-- case 3.9: DDL to a reference table is followed by TRUNCATE
BEGIN;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table"
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
DEBUG: validating foreign key constraint "fkey"
TRUNCATE on_update_fkey_table;
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
ROLLBACK;
BEGIN;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table"
DEBUG: building index "transitive_reference_table_pkey" on table "transitive_reference_table" serially
DEBUG: validating foreign key constraint "fkey"
TRUNCATE on_update_fkey_table;
DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially
ROLLBACK;
---------------------------------------------------------------------
--- Now, start testing the other way araound
@ -790,7 +770,6 @@ BEGIN;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table"
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
DEBUG: validating foreign key constraint "fkey"
ERROR: cannot execute DDL on reference table "reference_table" because there was a parallel SELECT access to distributed table "on_update_fkey_table" in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
@ -804,7 +783,6 @@ BEGIN;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table"
DEBUG: building index "transitive_reference_table_pkey" on table "transitive_reference_table" serially
DEBUG: validating foreign key constraint "fkey"
ERROR: cannot execute DDL on reference table "transitive_reference_table" because there was a parallel SELECT access to distributed table "on_update_fkey_table" in the same transaction
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
@ -1402,7 +1380,6 @@ SET client_min_messages TO DEBUG1;
-- set the mode to sequential for the next operations
CREATE TABLE reference_table(id int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_pkey" for table "reference_table"
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
SELECT create_reference_table('reference_table');
create_reference_table
---------------------------------------------------------------------
@ -1411,7 +1388,6 @@ SELECT create_reference_table('reference_table');
CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "distributed_table_pkey" for table "distributed_table"
DEBUG: building index "distributed_table_pkey" on table "distributed_table" serially
SELECT create_distributed_table('distributed_table', 'id');
create_distributed_table
---------------------------------------------------------------------

View File

@ -274,7 +274,6 @@ TRUNCATE target_table;
SET citus.log_remote_commands TO true; SET client_min_messages TO DEBUG;
CREATE TABLE results AS SELECT max(-a), array_agg(mapped_key) FROM source_table GROUP BY a;
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: building index "pg_toast_xxxxx_index" on table "pg_toast_xxxxx" serially
NOTICE: issuing SELECT max((OPERATOR(pg_catalog.-) a)) AS max, array_agg(mapped_key) AS array_agg, a AS worker_column_3 FROM insert_select_repartition.source_table_4213601 source_table WHERE true GROUP BY a
NOTICE: issuing SELECT max((OPERATOR(pg_catalog.-) a)) AS max, array_agg(mapped_key) AS array_agg, a AS worker_column_3 FROM insert_select_repartition.source_table_4213602 source_table WHERE true GROUP BY a
NOTICE: issuing SELECT max((OPERATOR(pg_catalog.-) a)) AS max, array_agg(mapped_key) AS array_agg, a AS worker_column_3 FROM insert_select_repartition.source_table_4213603 source_table WHERE true GROUP BY a

View File

@ -935,13 +935,9 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
-- test case for issue #3556
CREATE TABLE accounts (id text PRIMARY KEY);
DEBUG: building index "pg_toast_xxxxx_index" on table "pg_toast_xxxxx" serially
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "accounts_pkey" for table "accounts"
DEBUG: building index "accounts_pkey" on table "accounts" serially
CREATE TABLE stats (account_id text PRIMARY KEY, spent int);
DEBUG: building index "pg_toast_xxxxx_index" on table "pg_toast_xxxxx" serially
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "stats_pkey" for table "stats"
DEBUG: building index "stats_pkey" on table "stats" serially
SELECT create_distributed_table('accounts', 'id', colocate_with => 'none');
create_distributed_table
---------------------------------------------------------------------

View File

@ -14,7 +14,6 @@ SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE reference_table (key int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_pkey" for table "reference_table"
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
SELECT create_reference_table('reference_table');
create_reference_table
---------------------------------------------------------------------
@ -23,7 +22,6 @@ SELECT create_reference_table('reference_table');
CREATE TABLE distributed_table (key int PRIMARY KEY, age bigint CHECK (age >= 10));
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "distributed_table_pkey" for table "distributed_table"
DEBUG: building index "distributed_table_pkey" on table "distributed_table" serially
SELECT create_distributed_table('distributed_table','key');
create_distributed_table
---------------------------------------------------------------------
@ -38,7 +36,6 @@ DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
CREATE TABLE local_table (key int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "local_table_pkey" for table "local_table"
DEBUG: building index "local_table_pkey" on table "local_table" serially
INSERT INTO local_table SELECT * from generate_series(1, 10);
-- partitioned table
CREATE TABLE collections_list (

View File

@ -174,7 +174,7 @@ INSERT INTO composite_type_partitioned_table VALUES (123, '(123, 456)'::other_co
Node: host=localhost port=xxxxx dbname=regression
-> Insert on public.composite_type_partitioned_table_530003 (actual rows=0 loops=1)
-> Result (actual rows=1 loops=1)
Output: 123, '(123,456)'::test_composite_type
Output: xxxxxx
(9 rows)
SELECT run_command_on_coordinator_and_workers($cf$
@ -218,7 +218,7 @@ INSERT INTO composite_type_partitioned_table VALUES (123, '(456, 678)'::other_co
Node: host=localhost port=xxxxx dbname=regression
-> Insert on public.composite_type_partitioned_table_530000 (actual rows=0 loops=1)
-> Result (actual rows=1 loops=1)
Output: 123, '(456,678)'::test_composite_type
Output: xxxxxx
(9 rows)
-- create and distribute a table on enum type column

View File

@ -345,14 +345,14 @@ EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort (actual rows=50 loops=1)
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
Output: xxxxxx
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
Sort Method: quicksort Memory: 27kB
-> HashAggregate (actual rows=50 loops=1)
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
Output: xxxxxx
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
Output: remote_scan.l_quantity, remote_scan.count_quantity
Output: xxxxxx
Task Count: 2
Tuple data received from nodes: 780 bytes
Tasks Shown: One of 2
@ -361,48 +361,48 @@ Sort (actual rows=50 loops=1)
Tuple data received from node: 390 bytes
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate (actual rows=50 loops=1)
Output: l_quantity, count(*)
Output: xxxxxx
Group Key: lineitem.l_quantity
-> Seq Scan on public.lineitem_290000 lineitem (actual rows=6000 loops=1)
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Test query text output, with ANALYZE OFF
EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
SELECT l_quantity, count(*) count_quantity FROM lineitem
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
Sort
Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
Output: xxxxxx
Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
-> HashAggregate
Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
Output: xxxxxx
Group Key: remote_scan.l_quantity
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_quantity, remote_scan.count_quantity
Output: xxxxxx
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT l_quantity, count(*) AS count_quantity FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: l_quantity, count(*)
Output: xxxxxx
Group Key: lineitem.l_quantity
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Test verbose
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
Aggregate
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
Output: xxxxxx
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM lineitem_290000 lineitem WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
Output: xxxxxx
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Test join
EXPLAIN (COSTS FALSE)
SELECT * FROM lineitem
@ -525,40 +525,40 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem
HAVING sum(l_quantity) > 100;
Aggregate
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
Output: xxxxxx
Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
Output: xxxxxx
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) AS worker_column_4 FROM lineitem_290000 lineitem WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity)
Output: xxxxxx
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Test having without aggregate
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT l_quantity FROM lineitem
GROUP BY l_quantity
HAVING l_quantity > (100 * random());
HashAggregate
Output: remote_scan.l_quantity
Output: xxxxxx
Group Key: remote_scan.l_quantity
Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random()))
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_quantity, remote_scan.worker_column_2
Output: xxxxxx
Task Count: 2
Tasks Shown: One of 2
-> Task
Query: SELECT l_quantity, l_quantity AS worker_column_2 FROM lineitem_290000 lineitem WHERE true GROUP BY l_quantity
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: l_quantity, l_quantity
Output: xxxxxx
Group Key: lineitem.l_quantity
-> Seq Scan on public.lineitem_290000 lineitem
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Subquery pushdown tests with explain
EXPLAIN (COSTS OFF)
SELECT
@ -1395,26 +1395,26 @@ series AS (
SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey)
ORDER BY s;
Custom Scan (Citus Adaptive)
Output: remote_scan.l_orderkey
Output: xxxxxx
-> Distributed Subplan XXX_1
-> HashAggregate
Output: remote_scan.l_orderkey
Output: xxxxxx
Group Key: remote_scan.l_orderkey
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_orderkey
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT DISTINCT l_orderkey FROM lineitem_hash_part_360041 lineitem_hash_part WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: l_orderkey
Output: xxxxxx
Group Key: lineitem_hash_part.l_orderkey
-> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-> Distributed Subplan XXX_2
-> Function Scan on pg_catalog.generate_series s
Output: s
Output: xxxxxx
Function Call: generate_series(1, 10)
Task Count: 1
Tasks Shown: All
@ -1422,19 +1422,19 @@ Custom Scan (Citus Adaptive)
Query: SELECT keys.l_orderkey FROM ((SELECT intermediate_result.s FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(s integer)) series JOIN (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) keys ON ((series.s OPERATOR(pg_catalog.=) keys.l_orderkey))) ORDER BY series.s
Node: host=localhost port=xxxxx dbname=regression
-> Merge Join
Output: intermediate_result_1.l_orderkey, intermediate_result.s
Output: xxxxxx
Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey)
-> Sort
Output: intermediate_result.s
Output: xxxxxx
Sort Key: intermediate_result.s
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result
Output: intermediate_result.s
Output: xxxxxx
Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format)
-> Sort
Output: intermediate_result_1.l_orderkey
Output: xxxxxx
Sort Key: intermediate_result_1.l_orderkey
-> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1
Output: intermediate_result_1.l_orderkey
Output: xxxxxx
Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format)
SET citus.enable_cte_inlining TO true;
SELECT true AS valid FROM explain_json($$
@ -1755,7 +1755,7 @@ SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_t
4
(4 rows)
SELECT explain_analyze_output ~ 'Output: a, b' FROM worker_last_saved_explain_analyze();
SELECT explain_analyze_output ~ 'Output: xxxxxx
?column?
---------------------------------------------------------------------
t

View File

@ -435,7 +435,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id', 'hash');
-- not skipping validation would result in a distributed query, which emits debug messages
BEGIN;
SET LOCAL citus.enable_ddl_propagation TO off;
SET LOCAL client_min_messages TO DEBUG2;
SET LOCAL client_min_messages TO DEBUG1;
ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY (ref_id) REFERENCES referenced_table (id);
ABORT;
-- test foreign constraint creation

View File

@ -12,6 +12,8 @@
CREATE SCHEMA functions_in_joins;
SET search_path TO 'functions_in_joins';
SET citus.next_shard_id TO 2500000;
SET citus.replication_model to 'streaming';
SET citus.shard_replication_factor to 1;
CREATE TABLE table1 (id int, data int);
SELECT create_distributed_table('table1','id');
create_distributed_table
@ -38,9 +40,15 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta
CREATE FUNCTION add(integer, integer) RETURNS integer
AS 'SELECT $1 + $2;'
LANGUAGE SQL;
SELECT create_distributed_function('add(integer,integer)');
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC;
DEBUG: generating subplan XXX_1 for subquery SELECT sum FROM functions_in_joins.add(3, 5) sum(sum)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, sum.sum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum integer)) sum ON ((table1.id OPERATOR(pg_catalog.=) sum.sum))) ORDER BY table1.id
id | data | sum
---------------------------------------------------------------------
8 | 64 | 8

View File

@ -0,0 +1,250 @@
--
-- multi function in join queries aims to test the function calls that are
-- used in joins.
--
-- These functions are supposed to be executed on the worker and to ensure
-- that we wrap those functions inside (SELECT * FROM fnc()) sub queries.
--
-- We do not yet support those functions that:
-- - have lateral joins
-- - have WITH ORDINALITY clause
-- - are user-defined and immutable
CREATE SCHEMA functions_in_joins;
SET search_path TO 'functions_in_joins';
SET citus.next_shard_id TO 2500000;
SET citus.replication_model to 'streaming';
SET citus.shard_replication_factor to 1;
CREATE TABLE table1 (id int, data int);
SELECT create_distributed_table('table1','id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO table1
SELECT x, x*x
from generate_series(1, 100) as f (x);
-- Verbose messages for observing the subqueries that wrapped function calls
SET client_min_messages TO DEBUG1;
-- Check joins on a sequence
CREATE SEQUENCE numbers;
SELECT * FROM table1 JOIN nextval('numbers') n ON (id = n) ORDER BY id ASC;
DEBUG: generating subplan XXX_1 for subquery SELECT n FROM nextval('functions_in_joins.numbers'::regclass) n(n)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.n FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(n bigint)) n ON ((table1.id OPERATOR(pg_catalog.=) n.n))) ORDER BY table1.id
id | data | n
---------------------------------------------------------------------
1 | 1 | 1
(1 row)
-- Check joins of a function that returns a single integer
CREATE FUNCTION add(integer, integer) RETURNS integer
AS 'SELECT $1 + $2;'
LANGUAGE SQL;
SELECT create_distributed_function('add(integer,integer)');
DEBUG: switching to sequential query execution mode
DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands
create_distributed_function
---------------------------------------------------------------------
(1 row)
SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC;
DEBUG: generating subplan XXX_1 for subquery SELECT sum FROM functions_in_joins.add(3, 5) sum(sum)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, sum.sum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum integer)) sum ON ((table1.id OPERATOR(pg_catalog.=) sum.sum))) ORDER BY table1.id
id | data | sum
---------------------------------------------------------------------
8 | 64 | 8
(1 row)
-- Check join of plpgsql functions
-- a function returning a single integer
CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer AS $$
BEGIN
RETURN i + 1;
END;
$$ LANGUAGE plpgsql;
SELECT * FROM table1 JOIN increment(2) val ON (id = val) ORDER BY id ASC;
DEBUG: generating subplan XXX_1 for subquery SELECT val FROM functions_in_joins.increment(2) val(val)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, val.val FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.val FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(val integer)) val ON ((table1.id OPERATOR(pg_catalog.=) val.val))) ORDER BY table1.id
id | data | val
---------------------------------------------------------------------
3 | 9 | 3
(1 row)
-- a function that returns a set of integers
CREATE OR REPLACE FUNCTION next_k_integers(IN first_value INTEGER,
IN k INTEGER DEFAULT 3,
OUT result INTEGER)
RETURNS SETOF INTEGER AS $$
BEGIN
RETURN QUERY SELECT x FROM generate_series(first_value, first_value+k-1) f(x);
END;
$$ LANGUAGE plpgsql;
SELECT *
FROM table1 JOIN next_k_integers(3,2) next_integers ON (id = next_integers.result)
ORDER BY id ASC;
DEBUG: generating subplan XXX_1 for subquery SELECT result FROM functions_in_joins.next_k_integers(3, 2) next_integers(result)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, next_integers.result FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.result FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(result integer)) next_integers ON ((table1.id OPERATOR(pg_catalog.=) next_integers.result))) ORDER BY table1.id
id | data | result
---------------------------------------------------------------------
3 | 9 | 3
4 | 16 | 4
(2 rows)
-- a function returning set of records
CREATE FUNCTION get_set_of_records() RETURNS SETOF RECORD AS $cmd$
SELECT x, x+1 FROM generate_series(0,4) f(x)
$cmd$
LANGUAGE SQL;
SELECT * FROM table1 JOIN get_set_of_records() AS t2(x int, y int) ON (id = x) ORDER BY id ASC;
DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM functions_in_joins.get_set_of_records() t2(x integer, y integer)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, t2.x, t2.y FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) t2 ON ((table1.id OPERATOR(pg_catalog.=) t2.x))) ORDER BY table1.id
id | data | x | y
---------------------------------------------------------------------
1 | 1 | 1 | 2
2 | 4 | 2 | 3
3 | 9 | 3 | 4
4 | 16 | 4 | 5
(4 rows)
-- a function returning table
CREATE FUNCTION dup(int) RETURNS TABLE(f1 int, f2 text)
AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$
LANGUAGE SQL;
SELECT f.* FROM table1 t JOIN dup(32) f ON (f1 = id);
DEBUG: generating subplan XXX_1 for subquery SELECT f1, f2 FROM functions_in_joins.dup(32) f(f1, f2)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FROM (functions_in_joins.table1 t JOIN (SELECT intermediate_result.f1, intermediate_result.f2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(f1 integer, f2 text)) f ON ((f.f1 OPERATOR(pg_catalog.=) t.id)))
f1 | f2
---------------------------------------------------------------------
32 | 32 is text
(1 row)
-- a stable function
CREATE OR REPLACE FUNCTION the_minimum_id()
RETURNS INTEGER STABLE AS 'SELECT min(id) FROM table1' LANGUAGE SQL;
SELECT * FROM table1 JOIN the_minimum_id() min_id ON (id = min_id);
DEBUG: generating subplan XXX_1 for subquery SELECT min_id FROM functions_in_joins.the_minimum_id() min_id(min_id)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, min_id.min_id FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.min_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min_id integer)) min_id ON ((table1.id OPERATOR(pg_catalog.=) min_id.min_id)))
id | data | min_id
---------------------------------------------------------------------
1 | 1 | 1
(1 row)
-- a built-in immutable function
SELECT * FROM table1 JOIN abs(100) as hundred ON (id = hundred) ORDER BY id ASC;
id | data | hundred
---------------------------------------------------------------------
100 | 10000 | 100
(1 row)
-- function joins inside a CTE
WITH next_row_to_process AS (
SELECT * FROM table1 JOIN nextval('numbers') n ON (id = n)
)
SELECT *
FROM table1, next_row_to_process
WHERE table1.data <= next_row_to_process.data
ORDER BY 1,2 ASC;
DEBUG: generating subplan XXX_1 for CTE next_row_to_process: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN nextval('functions_in_joins.numbers'::regclass) n(n) ON ((table1.id OPERATOR(pg_catalog.=) n.n)))
DEBUG: generating subplan XXX_1 for subquery SELECT n FROM nextval('functions_in_joins.numbers'::regclass) n(n)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, n.n FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.n FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(n bigint)) n ON ((table1.id OPERATOR(pg_catalog.=) n.n)))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, next_row_to_process.id, next_row_to_process.data, next_row_to_process.n FROM functions_in_joins.table1, (SELECT intermediate_result.id, intermediate_result.data, intermediate_result.n FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, data integer, n bigint)) next_row_to_process WHERE (table1.data OPERATOR(pg_catalog.<=) next_row_to_process.data) ORDER BY table1.id, table1.data
id | data | id | data | n
---------------------------------------------------------------------
1 | 1 | 2 | 4 | 2
2 | 4 | 2 | 4 | 2
(2 rows)
-- Multiple functions in an RTE
SELECT * FROM ROWS FROM (next_k_integers(5), next_k_integers(10)) AS f(a, b),
table1 WHERE id = a ORDER BY id ASC;
DEBUG: generating subplan XXX_1 for subquery SELECT a, b FROM ROWS FROM(functions_in_joins.next_k_integers(5), functions_in_joins.next_k_integers(10)) f(a, b)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.a, f.b, table1.id, table1.data FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) f(a, b), functions_in_joins.table1 WHERE (table1.id OPERATOR(pg_catalog.=) f.a) ORDER BY table1.id
a | b | id | data
---------------------------------------------------------------------
5 | 10 | 5 | 25
6 | 11 | 6 | 36
7 | 12 | 7 | 49
(3 rows)
-- Custom Type returning function used in a join
RESET client_min_messages;
CREATE TYPE min_and_max AS (
minimum INT,
maximum INT
);
SET client_min_messages TO DEBUG1;
CREATE OR REPLACE FUNCTION max_and_min () RETURNS
min_and_max AS $$
DECLARE
result min_and_max%rowtype;
begin
select into result min(data) as minimum, max(data) as maximum from table1;
return result;
end;
$$ language plpgsql;
SELECT * FROM table1 JOIN max_and_min() m ON (m.maximum = data OR m.minimum = data) ORDER BY 1,2,3,4;
DEBUG: generating subplan XXX_1 for subquery SELECT minimum, maximum FROM functions_in_joins.max_and_min() m(minimum, maximum)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, m.minimum, m.maximum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.minimum, intermediate_result.maximum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(minimum integer, maximum integer)) m ON (((m.maximum OPERATOR(pg_catalog.=) table1.data) OR (m.minimum OPERATOR(pg_catalog.=) table1.data)))) ORDER BY table1.id, table1.data, m.minimum, m.maximum
id | data | minimum | maximum
---------------------------------------------------------------------
1 | 1 | 1 | 10000
100 | 10000 | 1 | 10000
(2 rows)
-- The following tests will fail as we do not support all joins on
-- all kinds of functions
-- In other words, we cannot recursively plan the functions and hence
-- the query fails on the workers
SET client_min_messages TO ERROR;
\set VERBOSITY terse
-- function joins in CTE results can create lateral joins that are not supported
-- we execute the query within a function to consolidate the error messages
-- between different executors
CREATE FUNCTION raise_failed_execution_func_join(query text) RETURNS void AS $$
BEGIN
EXECUTE query;
EXCEPTION WHEN OTHERS THEN
IF SQLERRM LIKE 'failed to execute task%' THEN
RAISE 'Task failed to execute';
ELSIF SQLERRM LIKE '%does not exist%' THEN
RAISE 'Task failed to execute';
END IF;
END;
$$LANGUAGE plpgsql;
SELECT raise_failed_execution_func_join($$
WITH one_row AS (
SELECT * FROM table1 WHERE id=52
)
SELECT table1.id, table1.data
FROM one_row, table1, next_k_integers(one_row.id, 5) next_five_ids
WHERE table1.id = next_five_ids;
$$);
ERROR: Task failed to execute
-- a user-defined immutable function
CREATE OR REPLACE FUNCTION the_answer_to_life()
RETURNS INTEGER IMMUTABLE AS 'SELECT 42' LANGUAGE SQL;
SELECT raise_failed_execution_func_join($$
SELECT * FROM table1 JOIN the_answer_to_life() the_answer ON (id = the_answer);
$$);
ERROR: Task failed to execute
SELECT raise_failed_execution_func_join($$
SELECT *
FROM table1
JOIN next_k_integers(10,5) WITH ORDINALITY next_integers
ON (id = next_integers.result);
$$);
ERROR: Task failed to execute
-- WITH ORDINALITY clause
SELECT raise_failed_execution_func_join($$
SELECT *
FROM table1
JOIN next_k_integers(10,5) WITH ORDINALITY next_integers
ON (id = next_integers.result)
ORDER BY id ASC;
$$);
ERROR: Task failed to execute
RESET client_min_messages;
DROP SCHEMA functions_in_joins CASCADE;
NOTICE: drop cascades to 12 other objects
SET search_path TO DEFAULT;

View File

@ -285,18 +285,18 @@ Sort
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx;
Aggregate
Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
Output: xxxxxx
Task Count: 16
Tasks Shown: One of 16
-> Task
Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM lineitem_mx_1220052 lineitem_mx WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
Output: xxxxxx
-> Seq Scan on public.lineitem_mx_1220052 lineitem_mx
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
Output: xxxxxx
-- Test join
EXPLAIN (COSTS FALSE)
SELECT * FROM lineitem_mx

View File

@ -1613,14 +1613,14 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id,
-> Hash Join
Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid))
-> Append
-> Seq Scan on partitioning_hash_join_test_0_1660133 partitioning_hash_join_test
-> Seq Scan on partitioning_hash_join_test_1_1660137 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_2_1660141 partitioning_hash_join_test_2
-> Seq Scan on partitioning_hash_join_test_0_1660133 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_1_1660137 partitioning_hash_join_test_2
-> Seq Scan on partitioning_hash_join_test_2_1660141 partitioning_hash_join_test_3
-> Hash
-> Append
-> Seq Scan on partitioning_hash_test_0_1660016 partitioning_hash_test
-> Seq Scan on partitioning_hash_test_1_1660020 partitioning_hash_test_1
-> Seq Scan on partitioning_hash_test_2_1660032 partitioning_hash_test_2
-> Seq Scan on partitioning_hash_test_0_1660016 partitioning_hash_test_1
-> Seq Scan on partitioning_hash_test_1_1660020 partitioning_hash_test_2
-> Seq Scan on partitioning_hash_test_2_1660032 partitioning_hash_test_3
(16 rows)
-- set partition-wise join on and parallel to off
@ -1651,20 +1651,20 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id,
Node: host=localhost port=xxxxx dbname=regression
-> Append
-> Hash Join
Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid))
-> Seq Scan on partitioning_hash_join_test_0_1660133 partitioning_hash_join_test
Hash Cond: ((partitioning_hash_join_test_1.id = partitioning_hash_test_1.id) AND (partitioning_hash_join_test_1.subid = partitioning_hash_test_1.subid))
-> Seq Scan on partitioning_hash_join_test_0_1660133 partitioning_hash_join_test_1
-> Hash
-> Seq Scan on partitioning_hash_test_0_1660016 partitioning_hash_test
-> Seq Scan on partitioning_hash_test_0_1660016 partitioning_hash_test_1
-> Hash Join
Hash Cond: ((partitioning_hash_test_1.id = partitioning_hash_join_test_1.id) AND (partitioning_hash_test_1.subid = partitioning_hash_join_test_1.subid))
-> Seq Scan on partitioning_hash_test_1_1660020 partitioning_hash_test_1
Hash Cond: ((partitioning_hash_test_2.id = partitioning_hash_join_test_2.id) AND (partitioning_hash_test_2.subid = partitioning_hash_join_test_2.subid))
-> Seq Scan on partitioning_hash_test_1_1660020 partitioning_hash_test_2
-> Hash
-> Seq Scan on partitioning_hash_join_test_1_1660137 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_1_1660137 partitioning_hash_join_test_2
-> Hash Join
Hash Cond: ((partitioning_hash_join_test_2.id = partitioning_hash_test_2.id) AND (partitioning_hash_join_test_2.subid = partitioning_hash_test_2.subid))
-> Seq Scan on partitioning_hash_join_test_2_1660141 partitioning_hash_join_test_2
Hash Cond: ((partitioning_hash_join_test_3.id = partitioning_hash_test_3.id) AND (partitioning_hash_join_test_3.subid = partitioning_hash_test_3.subid))
-> Seq Scan on partitioning_hash_join_test_2_1660141 partitioning_hash_join_test_3
-> Hash
-> Seq Scan on partitioning_hash_test_2_1660032 partitioning_hash_test_2
-> Seq Scan on partitioning_hash_test_2_1660032 partitioning_hash_test_3
(21 rows)
-- note that partition-wise joins only work when partition key is in the join
@ -1682,14 +1682,14 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id)
-> Hash Join
Hash Cond: (partitioning_hash_join_test.id = partitioning_hash_test.id)
-> Append
-> Seq Scan on partitioning_hash_join_test_0_1660133 partitioning_hash_join_test
-> Seq Scan on partitioning_hash_join_test_1_1660137 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_2_1660141 partitioning_hash_join_test_2
-> Seq Scan on partitioning_hash_join_test_0_1660133 partitioning_hash_join_test_1
-> Seq Scan on partitioning_hash_join_test_1_1660137 partitioning_hash_join_test_2
-> Seq Scan on partitioning_hash_join_test_2_1660141 partitioning_hash_join_test_3
-> Hash
-> Append
-> Seq Scan on partitioning_hash_test_0_1660016 partitioning_hash_test
-> Seq Scan on partitioning_hash_test_1_1660020 partitioning_hash_test_1
-> Seq Scan on partitioning_hash_test_2_1660032 partitioning_hash_test_2
-> Seq Scan on partitioning_hash_test_0_1660016 partitioning_hash_test_1
-> Seq Scan on partitioning_hash_test_1_1660020 partitioning_hash_test_2
-> Seq Scan on partitioning_hash_test_2_1660032 partitioning_hash_test_3
(16 rows)
-- reset partition-wise join

File diff suppressed because it is too large Load Diff

View File

@ -946,7 +946,7 @@ EXPLAIN (COSTS FALSE)
GROUP BY l_orderkey
ORDER BY 2
LIMIT 15;
QUERY PLAN
QUERY PLAN
---------------------------------------------------------------------
Limit
-> Sort
@ -958,12 +958,10 @@ EXPLAIN (COSTS FALSE)
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> GroupAggregate
-> HashAggregate
Group Key: l_orderkey
-> Sort
Sort Key: l_orderkey
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(15 rows)
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(13 rows)
-- check the plan if the hash aggreate is disabled.
SET enable_hashagg TO off;
@ -973,7 +971,7 @@ EXPLAIN (COSTS FALSE)
GROUP BY l_orderkey
ORDER BY 2
LIMIT 15;
QUERY PLAN
QUERY PLAN
---------------------------------------------------------------------
Limit
-> Unique
@ -984,12 +982,10 @@ EXPLAIN (COSTS FALSE)
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> GroupAggregate
-> HashAggregate
Group Key: l_orderkey
-> Sort
Sort Key: l_orderkey
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(14 rows)
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(12 rows)
SET enable_hashagg TO on;
-- distinct on non-partition column with aggregate

File diff suppressed because it is too large Load Diff

View File

@ -712,68 +712,68 @@ EXPLAIN (COSTS FALSE, VERBOSE TRUE)
QUERY PLAN
---------------------------------------------------------------------
Limit
Output: remote_scan.user_id, remote_scan.sum
Output: xxxxxx
-> Sort
Output: remote_scan.user_id, remote_scan.sum
Output: xxxxxx
Sort Key: remote_scan.sum DESC, remote_scan.user_id DESC
-> Custom Scan (Citus Adaptive)
Output: remote_scan.user_id, remote_scan.sum
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT worker_column_1 AS user_id, worker_column_2 AS sum FROM (SELECT ftop.user_id AS worker_column_1, ftop.sum AS worker_column_2 FROM (SELECT user_id_1.user_id, sum(user_id_1.counter) AS sum FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table_1400256 users_table UNION SELECT events_table.user_id, sum(events_table.value_2) OVER (PARTITION BY events_table.user_id) AS counter FROM public.events_table_1400260 events_table) user_id_1 GROUP BY user_id_1.user_id UNION SELECT user_id_2.user_id, sum(user_id_2.counter) AS sum FROM (SELECT users_table.user_id, sum(users_table.value_2) OVER (PARTITION BY users_table.user_id) AS counter FROM public.users_table_1400256 users_table UNION SELECT events_table.user_id, sum(events_table.value_2) OVER (PARTITION BY events_table.user_id) AS counter FROM public.events_table_1400260 events_table) user_id_2 GROUP BY user_id_2.user_id) ftop) worker_subquery ORDER BY worker_column_2 DESC, worker_column_1 DESC LIMIT '5'::bigint
Node: host=localhost port=xxxxx dbname=regression
-> Limit
Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
Output: xxxxxx
-> Sort
Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
Output: xxxxxx
Sort Key: (sum((sum(users_table.value_2) OVER (?)))) DESC, users_table.user_id DESC
-> HashAggregate
Output: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
Output: xxxxxx
Group Key: users_table.user_id, (sum((sum(users_table.value_2) OVER (?))))
-> Append
-> HashAggregate
Output: users_table.user_id, sum((sum(users_table.value_2) OVER (?)))
Output: xxxxxx
Group Key: users_table.user_id
-> HashAggregate
Output: users_table.user_id, (sum(users_table.value_2) OVER (?))
Output: xxxxxx
Group Key: users_table.user_id, (sum(users_table.value_2) OVER (?))
-> Append
-> WindowAgg
Output: users_table.user_id, sum(users_table.value_2) OVER (?)
Output: xxxxxx
-> Sort
Output: users_table.user_id, users_table.value_2
Output: xxxxxx
Sort Key: users_table.user_id
-> Seq Scan on public.users_table_1400256 users_table
Output: users_table.user_id, users_table.value_2
Output: xxxxxx
-> WindowAgg
Output: events_table.user_id, sum(events_table.value_2) OVER (?)
Output: xxxxxx
-> Sort
Output: events_table.user_id, events_table.value_2
Output: xxxxxx
Sort Key: events_table.user_id
-> Seq Scan on public.events_table_1400260 events_table
Output: events_table.user_id, events_table.value_2
Output: xxxxxx
-> HashAggregate
Output: users_table_1.user_id, sum((sum(users_table_1.value_2) OVER (?)))
Output: xxxxxx
Group Key: users_table_1.user_id
-> HashAggregate
Output: users_table_1.user_id, (sum(users_table_1.value_2) OVER (?))
Output: xxxxxx
Group Key: users_table_1.user_id, (sum(users_table_1.value_2) OVER (?))
-> Append
-> WindowAgg
Output: users_table_1.user_id, sum(users_table_1.value_2) OVER (?)
Output: xxxxxx
-> Sort
Output: users_table_1.user_id, users_table_1.value_2
Output: xxxxxx
Sort Key: users_table_1.user_id
-> Seq Scan on public.users_table_1400256 users_table_1
Output: users_table_1.user_id, users_table_1.value_2
Output: xxxxxx
-> WindowAgg
Output: events_table_1.user_id, sum(events_table_1.value_2) OVER (?)
Output: xxxxxx
-> Sort
Output: events_table_1.user_id, events_table_1.value_2
Output: xxxxxx
Sort Key: events_table_1.user_id
-> Seq Scan on public.events_table_1400260 events_table_1
Output: events_table_1.user_id, events_table_1.value_2
Output: xxxxxx
(63 rows)
-- test with window functions which aren't pushed down

View File

@ -0,0 +1,184 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q
\endif
create schema test_pg13;
set search_path to test_pg13;
SET citus.shard_replication_factor to 1;
SET citus.shard_count to 2;
SET citus.next_shard_id TO 65000;
CREATE TABLE dist_table (name char, age int);
CREATE INDEX name_index on dist_table(name);
SELECT create_distributed_table('dist_table', 'name');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SET client_min_messages to DEBUG1;
SET citus.log_remote_commands to ON;
-- make sure vacuum parallel doesn't error out
VACUUM (PARALLEL 2) dist_table;
NOTICE: issuing VACUUM (PARALLEL 2) test_pg13.dist_table_65000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (PARALLEL 2) test_pg13.dist_table_65001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (PARALLEL 0) dist_table;
NOTICE: issuing VACUUM (PARALLEL 0) test_pg13.dist_table_65000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (PARALLEL 0) test_pg13.dist_table_65001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- This should error out since -5 is not valid.
VACUUM (PARALLEL -5) dist_table;
ERROR: parallel vacuum degree must be between 0 and 1024
-- This should error out since no number is given
VACUUM (PARALLEL) dist_table;
ERROR: parallel option requires a value between 0 and 1024
RESET client_min_messages;
RESET citus.log_remote_commands;
-- test alter table alter column drop expression
CREATE TABLE generated_col_table(a int, b int GENERATED ALWAYS AS (a * 10) STORED);
SELECT create_distributed_table('generated_col_table', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO generated_col_table VALUES (1);
-- Make sure that we currently error out
ALTER TABLE generated_col_table ALTER COLUMN b DROP EXPRESSION;
ERROR: alter table command is currently unsupported
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ATTACH|DETACH PARTITION and TYPE subcommands are supported.
-- alter view rename column works fine
CREATE VIEW v AS SELECT * FROM dist_table;
ALTER VIEW v RENAME age to new_age;
SELECT * FROM v;
name | new_age
---------------------------------------------------------------------
(0 rows)
-- row suffix notation works fine
CREATE TABLE ab (a int, b int);
SELECT create_distributed_table('ab','a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO ab SELECT i, 2 * i FROM generate_series(1,20)i;
SELECT * FROM ab WHERE (ROW(a,b)).f1 > (ROW(10,30)).f1 ORDER BY 1,2;
a | b
---------------------------------------------------------------------
11 | 22
12 | 24
13 | 26
14 | 28
15 | 30
16 | 32
17 | 34
18 | 36
19 | 38
20 | 40
(10 rows)
SELECT * FROM ab WHERE (ROW(a,b)).f2 > (ROW(0,38)).f2 ORDER BY 1,2;
a | b
---------------------------------------------------------------------
20 | 40
(1 row)
-- test normalized
CREATE TABLE text_table (name text);
SELECT create_distributed_table('text_table', 'name');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO text_table VALUES ('abc');
-- not normalized
INSERT INTO text_table VALUES (U&'\0061\0308bc');
SELECT name IS NORMALIZED FROM text_table ORDER BY 1;
is_normalized
---------------------------------------------------------------------
f
t
(2 rows)
SELECT is_normalized(name) FROM text_table ORDER BY 1;
is_normalized
---------------------------------------------------------------------
f
t
(2 rows)
SELECT normalize(name) FROM text_table ORDER BY 1;
normalize
---------------------------------------------------------------------
abc
äbc
(2 rows)
INSERT INTO text_table VALUES (normalize(U&'\0061\0308bc', NFC));
-- test unicode escape
-- insert the word 'data' with unicode escapes
INSERT INTO text_table VALUES(U&'d\0061t\+000061');
-- insert the word слон
INSERT INTO text_table VALUES(U&'\0441\043B\043E\043D');
SELECT * FROM text_table ORDER BY 1;
name
---------------------------------------------------------------------
abc
äbc
data
äbc
слон
(5 rows)
-- Test that we don't propagate base types
CREATE TYPE myvarchar;
CREATE FUNCTION myvarcharin(cstring, oid, integer) RETURNS myvarchar
LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharin';
NOTICE: return type myvarchar is only a shell
CREATE FUNCTION myvarcharout(myvarchar) RETURNS cstring
LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharout';
NOTICE: argument type myvarchar is only a shell
CREATE TYPE myvarchar (
input = myvarcharin,
output = myvarcharout,
alignment = integer,
storage = main
);
CREATE TABLE my_table (a int, b myvarchar);
-- this will error because it seems that we don't propagate the "BASE TYPES"
-- Alter table also errors out so this doesn't seem to apply to use:
-- """Add ALTER TYPE options useful for extensions,
-- like TOAST and I/O functions control (Tomas Vondra, Tom Lane)"""
SELECT create_distributed_table('my_table', 'a');
ERROR: type "test_pg13.myvarchar" does not exist
CONTEXT: while executing command on localhost:xxxxx
CREATE TABLE test_table(a int, b tsvector);
SELECT create_distributed_table('test_table', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- we currently don't support this
CREATE INDEX test_table_index ON test_table USING gist (b tsvector_ops(siglen = 100));
ERROR: citus currently doesn't support operator class parameters in indexes
drop schema test_pg13 cascade;
NOTICE: drop cascades to 10 other objects
DETAIL: drop cascades to table dist_table
drop cascades to table generated_col_table
drop cascades to view v
drop cascades to table ab
drop cascades to table text_table
drop cascades to function myvarcharout(myvarchar)
drop cascades to type myvarchar
drop cascades to function myvarcharin(cstring,oid,integer)
drop cascades to table my_table
drop cascades to table test_table

View File

@ -0,0 +1,6 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve
\gset
\if :server_version_above_twelve
\else
\q

View File

@ -226,16 +226,17 @@ $$);
(1 row)
CREATE EXTENSION dict_int FROM unpackaged;
ERROR: CREATE EXTENSION ... FROM is no longer supported
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57637,t,0)
(1 row)
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.0)
(localhost,57637,t,"")
(1 row)
-- and add the other node
@ -264,15 +265,15 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.0)
(localhost,57638,t,1.0)
(localhost,57637,t,"")
(localhost,57638,t,"")
(2 rows)
-- and similarly check for the reference table

View File

@ -0,0 +1,444 @@
CREATE SCHEMA "extension'test";
-- use a schema name with escape character
SET search_path TO "extension'test";
SET client_min_messages TO WARNING;
-- create an extension on the given search_path
-- the extension is on contrib, so should be avaliable for the regression tests
CREATE EXTENSION seg;
-- make sure that both the schema and the extension is distributed
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg');
count
---------------------------------------------------------------------
1
(1 row)
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test');
count
---------------------------------------------------------------------
1
(1 row)
CREATE TABLE test_table (key int, value seg);
SELECT create_distributed_table('test_table', 'key');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- make sure that the table is also distributed now
SELECT count(*) from pg_dist_partition where logicalrelid='extension''test.test_table'::regclass;
count
---------------------------------------------------------------------
1
(1 row)
CREATE TYPE two_segs AS (seg_1 seg, seg_2 seg);
-- verify that the type that depends on the extension is also marked as distributed
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_type WHERE typname = 'two_segs' AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test'));
count
---------------------------------------------------------------------
1
(1 row)
-- now try to run CREATE EXTENSION within a transction block, all should work fine
BEGIN;
CREATE EXTENSION isn WITH SCHEMA public;
-- now, try create a reference table relying on the data types
-- this should not succeed as we do not distribute extension commands within transaction blocks
CREATE TABLE dist_table (key int, value public.issn);
SELECT create_distributed_table('dist_table', 'key');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- we can even run queries (sequentially) over the distributed table
SELECT * FROM dist_table;
key | value
---------------------------------------------------------------------
(0 rows)
INSERT INTO dist_table VALUES (1, public.issn('1436-4522'));
INSERT INTO dist_table SELECT * FROM dist_table RETURNING *;
key | value
---------------------------------------------------------------------
1 | 1436-4522
(1 row)
COMMIT;
-- make sure that the extension is distributed even if we run create extension in a transaction block
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn');
count
---------------------------------------------------------------------
1
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
CREATE TABLE ref_table (a public.issn);
-- now, create a reference table relying on the data types
SELECT create_reference_table('ref_table');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- now, drop the extension, recreate it with an older version and update it to latest version
DROP EXTENSION isn CASCADE;
CREATE EXTENSION isn WITH VERSION "1.1";
-- before updating the version, ensure the current version
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.1)
(localhost,57638,t,1.1)
(2 rows)
-- now, update to a newer version
ALTER EXTENSION isn UPDATE TO '1.2';
-- show that ALTER EXTENSION is propagated
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.2)
(localhost,57638,t,1.2)
(2 rows)
-- before changing the schema, ensure the current schmea
SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,extension'test)
(localhost,57638,t,extension'test)
(2 rows)
-- now change the schema
ALTER EXTENSION isn SET SCHEMA public;
-- switch back to public schema as we set extension's schema to public
SET search_path TO public;
-- make sure that the extension is distributed
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn');
count
---------------------------------------------------------------------
1
(1 row)
-- show that the ALTER EXTENSION command is propagated
SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,public)
(localhost,57638,t,public)
(2 rows)
-- drop the extension finally
DROP EXTENSION isn CASCADE;
-- now make sure that the reference tables depending on an extension can be succesfully created.
-- we should also ensure that we replicate this reference table (and hence the extension)
-- to new nodes after calling master_activate_node.
-- now, first drop seg and existing objects before next test
DROP EXTENSION seg CASCADE;
-- but as we have only 2 ports in postgresql tests, let's remove one of the nodes first
-- before remove, first remove the existing relations (due to the other tests)
DROP SCHEMA "extension'test" CASCADE;
SELECT 1 from master_remove_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- then create the extension
CREATE EXTENSION seg;
-- show that the extension is created on existing worker
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(1 row)
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.3)
(1 row)
-- now create the reference table
CREATE TABLE ref_table_2 (x seg);
SELECT create_reference_table('ref_table_2');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- we also add an old style extension from before extensions which we upgrade to an extension
-- by exercising it before the add node we verify it will create the extension (without upgrading)
-- it on the new worker as well. For this we use the dict_int extension which is in contrib,
-- supports FROM unpackaged, and is relatively small
-- create objects for dict_int manually so we can upgrade from unpacked
CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT;
CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT;
CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init );
CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template);
COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers';
SELECT run_command_on_workers($$
CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE FUNCTION")
(1 row)
SELECT run_command_on_workers($$
CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE FUNCTION")
(1 row)
SELECT run_command_on_workers($$
CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init );
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE TEXT SEARCH TEMPLATE")
(1 row)
SELECT run_command_on_workers($$
CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template);
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE TEXT SEARCH DICTIONARY")
(1 row)
SELECT run_command_on_workers($$
COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers';
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,COMMENT)
(1 row)
CREATE EXTENSION dict_int FROM unpackaged;
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(1 row)
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.0)
(1 row)
-- and add the other node
SELECT 1 from master_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- show that the extension is created on both existing and new node
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.3)
(localhost,57638,t,1.3)
(2 rows)
-- check for the unpackaged extension to be created correctly
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'dict_int'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.0)
(localhost,57638,t,1.0)
(2 rows)
-- and similarly check for the reference table
select count(*) from pg_dist_partition where partmethod='n' and logicalrelid='ref_table_2'::regclass;
count
---------------------------------------------------------------------
1
(1 row)
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='ref_table_2'::regclass;
count
---------------------------------------------------------------------
1
(1 row)
DROP TABLE ref_table_2;
-- now test create extension in another transaction block but rollback this time
BEGIN;
CREATE EXTENSION isn WITH VERSION '1.1' SCHEMA public;
ROLLBACK;
-- at the end of the transaction block, we did not create isn extension in coordinator or worker nodes as we rollback'ed
-- make sure that the extension is not distributed
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn');
count
---------------------------------------------------------------------
0
(1 row)
-- and the extension does not exist on workers
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
-- give a notice for the following commands saying that it is not
-- propagated to the workers. the user should run it manually on the workers
CREATE TABLE t1 (A int);
CREATE VIEW v1 AS select * from t1;
ALTER EXTENSION seg ADD VIEW v1;
ALTER EXTENSION seg DROP VIEW v1;
DROP VIEW v1;
DROP TABLE t1;
-- drop multiple extensions at the same time
CREATE EXTENSION isn WITH VERSION '1.1' SCHEMA public;
-- let's create another extension locally
set citus.enable_ddl_propagation to 'off';
CREATE EXTENSION pg_buffercache;
set citus.enable_ddl_propagation to 'on';
DROP EXTENSION pg_buffercache, isn CASCADE;
SELECT count(*) FROM pg_extension WHERE extname IN ('pg_buffercache', 'isn');
count
---------------------------------------------------------------------
0
(1 row)
-- drop extension should just work
DROP EXTENSION seg CASCADE;
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg');
count
---------------------------------------------------------------------
0
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'seg'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
-- make sure that the extension is not avaliable anymore as a distributed object
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn'));
count
---------------------------------------------------------------------
0
(1 row)
CREATE SCHEMA "extension'test";
SET search_path TO "extension'test";
-- check restriction for sequential execution
-- enable it and see that create command errors but continues its execution by changing citus.multi_shard_modify_mode TO 'off
BEGIN;
CREATE TABLE some_random_table (a int);
SELECT create_distributed_table('some_random_table', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE EXTENSION seg;
CREATE TABLE some_random_table_2 (a int, b seg);
SELECT create_distributed_table('some_random_table_2', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
ROLLBACK;
-- show that the CREATE EXTENSION command propagated even if the transaction
-- block is rollbacked, that's a shortcoming of dependency creation logic
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1.3)
(localhost,57638,t,1.3)
(2 rows)
-- drop the schema and all the objects
DROP SCHEMA "extension'test" CASCADE;
-- recreate for the next tests
CREATE SCHEMA "extension'test";
-- use a schema name with escape character
SET search_path TO "extension'test";
-- remove the node, we'll add back again
SELECT 1 from master_remove_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- now, create a type that depends on another type, which
-- finally depends on an extension
BEGIN;
SET citus.shard_replication_factor TO 1;
CREATE EXTENSION seg;
CREATE EXTENSION isn;
CREATE TYPE test_type AS (a int, b seg);
CREATE TYPE test_type_2 AS (a int, b test_type);
CREATE TABLE t2 (a int, b test_type_2, c issn);
SELECT create_distributed_table('t2', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TYPE test_type_3 AS (a int, b test_type, c issn);
CREATE TABLE t3 (a int, b test_type_3);
SELECT create_reference_table('t3');
create_reference_table
---------------------------------------------------------------------
(1 row)
COMMIT;
-- add the node back
SELECT 1 from master_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- make sure that both extensions are created on both nodes
SELECT count(*) FROM citus.pg_dist_object WHERE objid IN (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn'));
count
---------------------------------------------------------------------
2
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname IN ('seg', 'isn')$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,2)
(localhost,57638,t,2)
(2 rows)
-- drop the schema and all the objects
DROP SCHEMA "extension'test" CASCADE;

View File

@ -554,14 +554,14 @@ $$);
coordinator_plan
---------------------------------------------------------------------
Sort
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, (any_value(remote_scan.v1)), (any_value(remote_scan.v2)), ((any_value(remote_scan.v3) || '_notgrouped'::text)), remote_scan.va1, remote_scan.va2, remote_scan.va3, (COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint))
Output: xxxxxx
Sort Key: remote_scan.k1
-> HashAggregate
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, any_value(remote_scan.v1), any_value(remote_scan.v2), (any_value(remote_scan.v3) || '_notgrouped'::text), remote_scan.va1, remote_scan.va2, remote_scan.va3, COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)
Output: xxxxxx
Group Key: remote_scan.k1, remote_scan.va1
Filter: ((length(remote_scan.worker_column_11) + length(any_value(remote_scan.worker_column_12))) < length((any_value(remote_scan.worker_column_13) || '_append'::text)))
-> Custom Scan (Citus Adaptive)
Output: remote_scan.k1, remote_scan.k2, remote_scan.k3, remote_scan.v1, remote_scan.v2, remote_scan.v3, remote_scan.va1, remote_scan.va2, remote_scan.va3, remote_scan.count, remote_scan.worker_column_11, remote_scan.worker_column_12, remote_scan.worker_column_13
Output: xxxxxx
Task Count: 4
(10 rows)

View File

@ -42,18 +42,18 @@ FROM latencies;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest(remote_scan.tdigest)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(latency, 100)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest(value, compression)
@ -64,17 +64,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.a
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(12 rows)
-- explain grouping by non-distribution column is partially pushed down for tdigest(value, compression)
@ -85,20 +85,20 @@ GROUP BY b;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.b, tdigest(remote_scan.tdigest)
Output: xxxxxx
Group Key: remote_scan.b
-> Custom Scan (Citus Adaptive)
Output: remote_scan.b, remote_scan.tdigest
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT b, public.tdigest(latency, 100) AS tdigest FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: b, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.b
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(15 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantile)
@ -108,18 +108,18 @@ FROM latencies;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(latency, 100)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantile)
@ -130,17 +130,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile(latency, 100, '0.99'::double precision) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile(latency, 100, '0.99'::double precision)
Output: xxxxxx
Group Key: latencies.a
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(12 rows)
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantile)
@ -151,20 +151,20 @@ GROUP BY b;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.b, tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
Output: xxxxxx
Group Key: remote_scan.b
-> Custom Scan (Citus Adaptive)
Output: remote_scan.b, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: b, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.b
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(15 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
@ -174,18 +174,18 @@ FROM latencies;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(latency, 100)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
@ -196,17 +196,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile(latency, 100, '{0.99,0.95}'::double precision[]) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile(latency, 100, '{0.99,0.95}'::double precision[])
Output: xxxxxx
Group Key: latencies.a
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(12 rows)
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile(value, compression, quantiles[])
@ -217,20 +217,20 @@ GROUP BY b;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.b, tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
Output: xxxxxx
Group Key: remote_scan.b
-> Custom Scan (Citus Adaptive)
Output: remote_scan.b, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: b, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.b
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(15 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
@ -240,18 +240,18 @@ FROM latencies;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(latency, 100)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
@ -262,17 +262,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile_of(latency, 100, '9000'::double precision) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile_of(latency, 100, '9000'::double precision)
Output: xxxxxx
Group Key: latencies.a
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(12 rows)
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
@ -283,20 +283,20 @@ GROUP BY b;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.b, tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
Output: xxxxxx
Group Key: remote_scan.b
-> Custom Scan (Citus Adaptive)
Output: remote_scan.b, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: b, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.b
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(15 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
@ -306,18 +306,18 @@ FROM latencies;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(latency, 100)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
@ -328,17 +328,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile_of(latency, 100, '{9000,9500}'::double precision[]) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile_of(latency, 100, '{9000,9500}'::double precision[])
Output: xxxxxx
Group Key: latencies.a
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(12 rows)
-- explain grouping by non-distribution column is partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
@ -349,20 +349,20 @@ GROUP BY b;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Output: remote_scan.b, tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
Output: xxxxxx
Group Key: remote_scan.b
-> Custom Scan (Citus Adaptive)
Output: remote_scan.b, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT b, public.tdigest(latency, 100) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_20070000 latencies WHERE true GROUP BY b
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: b, tdigest(latency, 100)
Output: xxxxxx
Group Key: latencies.b
-> Seq Scan on tdigest_aggregate_support.latencies_20070000 latencies
Output: a, b, latency
Output: xxxxxx
(15 rows)
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges
@ -413,18 +413,18 @@ FROM latencies_rollup;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest(remote_scan.tdigest)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(tdigest) AS tdigest FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(tdigest)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest(tdigest)
@ -435,17 +435,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest(tdigest) AS tdigest FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest(tdigest)
Output: xxxxxx
Group Key: latencies_rollup.a
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(12 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile(tdigest, quantile)
@ -455,18 +455,18 @@ FROM latencies_rollup;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile(remote_scan.tdigest_percentile, '0.99'::double precision)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(tdigest)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(tdigest, quantile)
@ -477,17 +477,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile(tdigest, '0.99'::double precision) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile(tdigest, '0.99'::double precision)
Output: xxxxxx
Group Key: latencies_rollup.a
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(12 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile(value, compression, quantiles[])
@ -497,18 +497,18 @@ FROM latencies_rollup;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile(remote_scan.tdigest_percentile, '{0.99,0.95}'::double precision[])
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(tdigest)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile(value, compression, quantiles[])
@ -519,17 +519,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile(tdigest, '{0.99,0.95}'::double precision[]) AS tdigest_percentile FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile(tdigest, '{0.99,0.95}'::double precision[])
Output: xxxxxx
Group Key: latencies_rollup.a
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(12 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
@ -539,18 +539,18 @@ FROM latencies_rollup;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '9000'::double precision)
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(tdigest)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_value)
@ -561,17 +561,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile_of(tdigest, '9000'::double precision) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile_of(tdigest, '9000'::double precision)
Output: xxxxxx
Group Key: latencies_rollup.a
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(12 rows)
-- explain no grouping to verify partially pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
@ -581,18 +581,18 @@ FROM latencies_rollup;
QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: tdigest_percentile_of(remote_scan.tdigest_percentile_of, '{9000,9500}'::double precision[])
Output: xxxxxx
-> Custom Scan (Citus Adaptive)
Output: remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT public.tdigest(tdigest) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
Output: tdigest(tdigest)
Output: xxxxxx
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(13 rows)
-- explain grouping by distribution column is completely pushed down for tdigest_precentile_of(value, compression, hypotetical_values[])
@ -603,17 +603,17 @@ GROUP BY a;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.a, remote_scan.tdigest_percentile_of
Output: xxxxxx
Task Count: 4
Tasks Shown: One of 4
-> Task
Query: SELECT a, public.tdigest_percentile_of(tdigest, '{9000,9500}'::double precision[]) AS tdigest_percentile_of FROM tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup WHERE true GROUP BY a
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: a, tdigest_percentile_of(tdigest, '{9000,9500}'::double precision[])
Output: xxxxxx
Group Key: latencies_rollup.a
-> Seq Scan on tdigest_aggregate_support.latencies_rollup_20070004 latencies_rollup
Output: a, tdigest
Output: xxxxxx
(12 rows)
-- verifying results - should be stable due to seed while inserting the data, if failure due to data these queries could be removed or check for certain ranges

View File

@ -154,14 +154,6 @@ SELECT
HAVING count(distinct l_suppkey) > 1550
ORDER BY 1, 2 DESC;
EXPLAIN (COSTS false, VERBOSE true)
SELECT
l_shipmode, count(distinct l_partkey)
FROM lineitem_hash
GROUP BY l_shipmode
HAVING count(distinct l_suppkey) > 1550
ORDER BY 1, 2 DESC;
-- count distinct is supported on single table subqueries
SELECT *
FROM (

View File

@ -80,7 +80,7 @@ test: set_operation_and_local_tables
test: subqueries_deep subquery_view subquery_partitioning subquery_complex_target_list subqueries_not_supported subquery_in_where
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins non_colocated_join_order
test: subquery_prepared_statements pg12 cte_inline
test: subquery_prepared_statements pg12 cte_inline pg13
# ----------
# Miscellaneous tests to check our query planning behavior

View File

@ -355,39 +355,6 @@ SELECT
TRUCK | 1757
(7 rows)
EXPLAIN (COSTS false, VERBOSE true)
SELECT
l_shipmode, count(distinct l_partkey)
FROM lineitem_hash
GROUP BY l_shipmode
HAVING count(distinct l_suppkey) > 1550
ORDER BY 1, 2 DESC;
QUERY PLAN
---------------------------------------------------------------------
Sort
Output: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count))
Sort Key: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count)) DESC
-> GroupAggregate
Output: remote_scan.l_shipmode, count(DISTINCT remote_scan.count)
Group Key: remote_scan.l_shipmode
Filter: (count(DISTINCT remote_scan.worker_column_3) > 1550)
-> Sort
Output: remote_scan.l_shipmode, remote_scan.count, remote_scan.worker_column_3
Sort Key: remote_scan.l_shipmode
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_shipmode, remote_scan.count, remote_scan.worker_column_3
Task Count: 8
Tasks Shown: One of 8
-> Task
Query: SELECT l_shipmode, l_partkey AS count, l_suppkey AS worker_column_3 FROM lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_shipmode, l_partkey, l_suppkey
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Output: l_shipmode, l_partkey, l_suppkey
Group Key: lineitem_hash.l_shipmode, lineitem_hash.l_partkey, lineitem_hash.l_suppkey
-> Seq Scan on public.lineitem_hash_240000 lineitem_hash
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
(22 rows)
-- count distinct is supported on single table subqueries
SELECT *
FROM (

View File

@ -44,11 +44,6 @@ step "s1-add-second-worker"
SELECT 1 FROM master_add_node('localhost', 57638);
}
step "s1-remove-second-worker"
{
SELECT master_remove_node('localhost', 57638);
}
step "s1-drop-reference-table"
{
DROP TABLE test_reference_table;

View File

@ -45,11 +45,6 @@ step "s1-remove-node-1"
SELECT * FROM master_remove_node('localhost', 57637);
}
step "s1-remove-node-2"
{
SELECT * FROM master_remove_node('localhost', 57638);
}
step "s1-abort"
{
ABORT;
@ -67,11 +62,6 @@ step "s1-show-nodes"
session "s2"
step "s2-begin"
{
BEGIN;
}
step "s2-add-node-1"
{
SELECT 1 FROM master_add_node('localhost', 57637);
@ -102,11 +92,6 @@ step "s2-remove-node-2"
SELECT * FROM master_remove_node('localhost', 57638);
}
step "s2-commit"
{
COMMIT;
}
// session 1 adds a node, session 2 removes it, should be ok
permutation "s1-begin" "s1-add-node-1" "s2-remove-node-1" "s1-commit" "s1-show-nodes"
// add a different node from 2 sessions, should be ok

View File

@ -40,7 +40,6 @@ step "s1-ddl-drop-index" { DROP INDEX append_copy_index; }
step "s1-ddl-add-column" { ALTER TABLE append_copy ADD new_column int DEFAULT 0; }
step "s1-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; }
step "s1-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; }
step "s1-ddl-unique-constraint" { ALTER TABLE append_copy ADD CONSTRAINT append_copy_unique UNIQUE(id); }
step "s1-table-size" { SELECT citus_total_relation_size('append_copy'); }
step "s1-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM append_copy WHERE id <= 4;'); }
step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); }
@ -54,7 +53,6 @@ step "s1-commit" { COMMIT; }
// session 2
session "s2"
step "s2-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; }
step "s2-copy-additional-column" { COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; }
step "s2-router-select" { SELECT * FROM append_copy WHERE id = 1; }
step "s2-real-time-select" { SELECT * FROM append_copy ORDER BY 1, 2; }
step "s2-adaptive-select"

View File

@ -21,11 +21,6 @@ step "s1-begin"
BEGIN;
}
step "s1-commit"
{
COMMIT;
}
step "s1-rollback"
{
ROLLBACK;

View File

@ -79,11 +79,6 @@ step "s2-select-for-update"
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM copy_table WHERE id=5 FOR UPDATE');
}
step "s2-coordinator-create-index-concurrently"
{
CREATE INDEX CONCURRENTLY copy_table_index ON copy_table(id);
}
step "s2-commit-worker"
{
SELECT run_commands_on_session_level_connection_to_node('COMMIT');

View File

@ -38,16 +38,6 @@ step "s1-commit"
COMMIT;
}
step "s1-query-table"
{
SELECT * FROM dist_table;
}
step "s1-show-nodes"
{
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
}
step "s1-show-placements"
{
SELECT

Some files were not shown because too many files have changed in this diff Show More